drm/i915: Protect active fences on i915
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33 #include <linux/pci.h>
34
35 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41                                              int write);
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43                                                      uint64_t offset,
44                                                      uint64_t size);
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
47 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50                                            unsigned alignment);
51 static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
52 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
53 static int i915_gem_evict_something(struct drm_device *dev);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file_priv);
57
58 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
59                      unsigned long end)
60 {
61         drm_i915_private_t *dev_priv = dev->dev_private;
62
63         if (start >= end ||
64             (start & (PAGE_SIZE - 1)) != 0 ||
65             (end & (PAGE_SIZE - 1)) != 0) {
66                 return -EINVAL;
67         }
68
69         drm_mm_init(&dev_priv->mm.gtt_space, start,
70                     end - start);
71
72         dev->gtt_total = (uint32_t) (end - start);
73
74         return 0;
75 }
76
77 int
78 i915_gem_init_ioctl(struct drm_device *dev, void *data,
79                     struct drm_file *file_priv)
80 {
81         struct drm_i915_gem_init *args = data;
82         int ret;
83
84         mutex_lock(&dev->struct_mutex);
85         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
86         mutex_unlock(&dev->struct_mutex);
87
88         return ret;
89 }
90
91 int
92 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
93                             struct drm_file *file_priv)
94 {
95         struct drm_i915_gem_get_aperture *args = data;
96
97         if (!(dev->driver->driver_features & DRIVER_GEM))
98                 return -ENODEV;
99
100         args->aper_size = dev->gtt_total;
101         args->aper_available_size = (args->aper_size -
102                                      atomic_read(&dev->pin_memory));
103
104         return 0;
105 }
106
107
108 /**
109  * Creates a new mm object and returns a handle to it.
110  */
111 int
112 i915_gem_create_ioctl(struct drm_device *dev, void *data,
113                       struct drm_file *file_priv)
114 {
115         struct drm_i915_gem_create *args = data;
116         struct drm_gem_object *obj;
117         int handle, ret;
118
119         args->size = roundup(args->size, PAGE_SIZE);
120
121         /* Allocate the new object */
122         obj = drm_gem_object_alloc(dev, args->size);
123         if (obj == NULL)
124                 return -ENOMEM;
125
126         ret = drm_gem_handle_create(file_priv, obj, &handle);
127         mutex_lock(&dev->struct_mutex);
128         drm_gem_object_handle_unreference(obj);
129         mutex_unlock(&dev->struct_mutex);
130
131         if (ret)
132                 return ret;
133
134         args->handle = handle;
135
136         return 0;
137 }
138
139 /**
140  * Reads data from the object referenced by handle.
141  *
142  * On error, the contents of *data are undefined.
143  */
144 int
145 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
146                      struct drm_file *file_priv)
147 {
148         struct drm_i915_gem_pread *args = data;
149         struct drm_gem_object *obj;
150         struct drm_i915_gem_object *obj_priv;
151         ssize_t read;
152         loff_t offset;
153         int ret;
154
155         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
156         if (obj == NULL)
157                 return -EBADF;
158         obj_priv = obj->driver_private;
159
160         /* Bounds check source.
161          *
162          * XXX: This could use review for overflow issues...
163          */
164         if (args->offset > obj->size || args->size > obj->size ||
165             args->offset + args->size > obj->size) {
166                 drm_gem_object_unreference(obj);
167                 return -EINVAL;
168         }
169
170         mutex_lock(&dev->struct_mutex);
171
172         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
173                                                         args->size);
174         if (ret != 0) {
175                 drm_gem_object_unreference(obj);
176                 mutex_unlock(&dev->struct_mutex);
177                 return ret;
178         }
179
180         offset = args->offset;
181
182         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183                         args->size, &offset);
184         if (read != args->size) {
185                 drm_gem_object_unreference(obj);
186                 mutex_unlock(&dev->struct_mutex);
187                 if (read < 0)
188                         return read;
189                 else
190                         return -EINVAL;
191         }
192
193         drm_gem_object_unreference(obj);
194         mutex_unlock(&dev->struct_mutex);
195
196         return 0;
197 }
198
199 /* This is the fast write path which cannot handle
200  * page faults in the source data
201  */
202
203 static inline int
204 fast_user_write(struct io_mapping *mapping,
205                 loff_t page_base, int page_offset,
206                 char __user *user_data,
207                 int length)
208 {
209         char *vaddr_atomic;
210         unsigned long unwritten;
211
212         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
213         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
214                                                       user_data, length);
215         io_mapping_unmap_atomic(vaddr_atomic);
216         if (unwritten)
217                 return -EFAULT;
218         return 0;
219 }
220
221 /* Here's the write path which can sleep for
222  * page faults
223  */
224
225 static inline int
226 slow_user_write(struct io_mapping *mapping,
227                 loff_t page_base, int page_offset,
228                 char __user *user_data,
229                 int length)
230 {
231         char __iomem *vaddr;
232         unsigned long unwritten;
233
234         vaddr = io_mapping_map_wc(mapping, page_base);
235         if (vaddr == NULL)
236                 return -EFAULT;
237         unwritten = __copy_from_user(vaddr + page_offset,
238                                      user_data, length);
239         io_mapping_unmap(vaddr);
240         if (unwritten)
241                 return -EFAULT;
242         return 0;
243 }
244
245 static int
246 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
247                     struct drm_i915_gem_pwrite *args,
248                     struct drm_file *file_priv)
249 {
250         struct drm_i915_gem_object *obj_priv = obj->driver_private;
251         drm_i915_private_t *dev_priv = dev->dev_private;
252         ssize_t remain;
253         loff_t offset, page_base;
254         char __user *user_data;
255         int page_offset, page_length;
256         int ret;
257
258         user_data = (char __user *) (uintptr_t) args->data_ptr;
259         remain = args->size;
260         if (!access_ok(VERIFY_READ, user_data, remain))
261                 return -EFAULT;
262
263
264         mutex_lock(&dev->struct_mutex);
265         ret = i915_gem_object_pin(obj, 0);
266         if (ret) {
267                 mutex_unlock(&dev->struct_mutex);
268                 return ret;
269         }
270         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
271         if (ret)
272                 goto fail;
273
274         obj_priv = obj->driver_private;
275         offset = obj_priv->gtt_offset + args->offset;
276         obj_priv->dirty = 1;
277
278         while (remain > 0) {
279                 /* Operation in this page
280                  *
281                  * page_base = page offset within aperture
282                  * page_offset = offset within page
283                  * page_length = bytes to copy for this page
284                  */
285                 page_base = (offset & ~(PAGE_SIZE-1));
286                 page_offset = offset & (PAGE_SIZE-1);
287                 page_length = remain;
288                 if ((page_offset + remain) > PAGE_SIZE)
289                         page_length = PAGE_SIZE - page_offset;
290
291                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
292                                        page_offset, user_data, page_length);
293
294                 /* If we get a fault while copying data, then (presumably) our
295                  * source page isn't available. In this case, use the
296                  * non-atomic function
297                  */
298                 if (ret) {
299                         ret = slow_user_write (dev_priv->mm.gtt_mapping,
300                                                page_base, page_offset,
301                                                user_data, page_length);
302                         if (ret)
303                                 goto fail;
304                 }
305
306                 remain -= page_length;
307                 user_data += page_length;
308                 offset += page_length;
309         }
310
311 fail:
312         i915_gem_object_unpin(obj);
313         mutex_unlock(&dev->struct_mutex);
314
315         return ret;
316 }
317
318 static int
319 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
320                       struct drm_i915_gem_pwrite *args,
321                       struct drm_file *file_priv)
322 {
323         int ret;
324         loff_t offset;
325         ssize_t written;
326
327         mutex_lock(&dev->struct_mutex);
328
329         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
330         if (ret) {
331                 mutex_unlock(&dev->struct_mutex);
332                 return ret;
333         }
334
335         offset = args->offset;
336
337         written = vfs_write(obj->filp,
338                             (char __user *)(uintptr_t) args->data_ptr,
339                             args->size, &offset);
340         if (written != args->size) {
341                 mutex_unlock(&dev->struct_mutex);
342                 if (written < 0)
343                         return written;
344                 else
345                         return -EINVAL;
346         }
347
348         mutex_unlock(&dev->struct_mutex);
349
350         return 0;
351 }
352
353 /**
354  * Writes data to the object referenced by handle.
355  *
356  * On error, the contents of the buffer that were to be modified are undefined.
357  */
358 int
359 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
360                       struct drm_file *file_priv)
361 {
362         struct drm_i915_gem_pwrite *args = data;
363         struct drm_gem_object *obj;
364         struct drm_i915_gem_object *obj_priv;
365         int ret = 0;
366
367         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
368         if (obj == NULL)
369                 return -EBADF;
370         obj_priv = obj->driver_private;
371
372         /* Bounds check destination.
373          *
374          * XXX: This could use review for overflow issues...
375          */
376         if (args->offset > obj->size || args->size > obj->size ||
377             args->offset + args->size > obj->size) {
378                 drm_gem_object_unreference(obj);
379                 return -EINVAL;
380         }
381
382         /* We can only do the GTT pwrite on untiled buffers, as otherwise
383          * it would end up going through the fenced access, and we'll get
384          * different detiling behavior between reading and writing.
385          * pread/pwrite currently are reading and writing from the CPU
386          * perspective, requiring manual detiling by the client.
387          */
388         if (obj_priv->phys_obj)
389                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
390         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
391                  dev->gtt_total != 0)
392                 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
393         else
394                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
395
396 #if WATCH_PWRITE
397         if (ret)
398                 DRM_INFO("pwrite failed %d\n", ret);
399 #endif
400
401         drm_gem_object_unreference(obj);
402
403         return ret;
404 }
405
406 /**
407  * Called when user space prepares to use an object with the CPU, either
408  * through the mmap ioctl's mapping or a GTT mapping.
409  */
410 int
411 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
412                           struct drm_file *file_priv)
413 {
414         struct drm_i915_gem_set_domain *args = data;
415         struct drm_gem_object *obj;
416         uint32_t read_domains = args->read_domains;
417         uint32_t write_domain = args->write_domain;
418         int ret;
419
420         if (!(dev->driver->driver_features & DRIVER_GEM))
421                 return -ENODEV;
422
423         /* Only handle setting domains to types used by the CPU. */
424         if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
425                 return -EINVAL;
426
427         if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
428                 return -EINVAL;
429
430         /* Having something in the write domain implies it's in the read
431          * domain, and only that read domain.  Enforce that in the request.
432          */
433         if (write_domain != 0 && read_domains != write_domain)
434                 return -EINVAL;
435
436         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
437         if (obj == NULL)
438                 return -EBADF;
439
440         mutex_lock(&dev->struct_mutex);
441 #if WATCH_BUF
442         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
443                  obj, obj->size, read_domains, write_domain);
444 #endif
445         if (read_domains & I915_GEM_DOMAIN_GTT) {
446                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
447
448                 /* Silently promote "you're not bound, there was nothing to do"
449                  * to success, since the client was just asking us to
450                  * make sure everything was done.
451                  */
452                 if (ret == -EINVAL)
453                         ret = 0;
454         } else {
455                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
456         }
457
458         drm_gem_object_unreference(obj);
459         mutex_unlock(&dev->struct_mutex);
460         return ret;
461 }
462
463 /**
464  * Called when user space has done writes to this buffer
465  */
466 int
467 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
468                       struct drm_file *file_priv)
469 {
470         struct drm_i915_gem_sw_finish *args = data;
471         struct drm_gem_object *obj;
472         struct drm_i915_gem_object *obj_priv;
473         int ret = 0;
474
475         if (!(dev->driver->driver_features & DRIVER_GEM))
476                 return -ENODEV;
477
478         mutex_lock(&dev->struct_mutex);
479         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
480         if (obj == NULL) {
481                 mutex_unlock(&dev->struct_mutex);
482                 return -EBADF;
483         }
484
485 #if WATCH_BUF
486         DRM_INFO("%s: sw_finish %d (%p %d)\n",
487                  __func__, args->handle, obj, obj->size);
488 #endif
489         obj_priv = obj->driver_private;
490
491         /* Pinned buffers may be scanout, so flush the cache */
492         if (obj_priv->pin_count)
493                 i915_gem_object_flush_cpu_write_domain(obj);
494
495         drm_gem_object_unreference(obj);
496         mutex_unlock(&dev->struct_mutex);
497         return ret;
498 }
499
500 /**
501  * Maps the contents of an object, returning the address it is mapped
502  * into.
503  *
504  * While the mapping holds a reference on the contents of the object, it doesn't
505  * imply a ref on the object itself.
506  */
507 int
508 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
509                    struct drm_file *file_priv)
510 {
511         struct drm_i915_gem_mmap *args = data;
512         struct drm_gem_object *obj;
513         loff_t offset;
514         unsigned long addr;
515
516         if (!(dev->driver->driver_features & DRIVER_GEM))
517                 return -ENODEV;
518
519         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
520         if (obj == NULL)
521                 return -EBADF;
522
523         offset = args->offset;
524
525         down_write(&current->mm->mmap_sem);
526         addr = do_mmap(obj->filp, 0, args->size,
527                        PROT_READ | PROT_WRITE, MAP_SHARED,
528                        args->offset);
529         up_write(&current->mm->mmap_sem);
530         mutex_lock(&dev->struct_mutex);
531         drm_gem_object_unreference(obj);
532         mutex_unlock(&dev->struct_mutex);
533         if (IS_ERR((void *)addr))
534                 return addr;
535
536         args->addr_ptr = (uint64_t) addr;
537
538         return 0;
539 }
540
541 /**
542  * i915_gem_fault - fault a page into the GTT
543  * vma: VMA in question
544  * vmf: fault info
545  *
546  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
547  * from userspace.  The fault handler takes care of binding the object to
548  * the GTT (if needed), allocating and programming a fence register (again,
549  * only if needed based on whether the old reg is still valid or the object
550  * is tiled) and inserting a new PTE into the faulting process.
551  *
552  * Note that the faulting process may involve evicting existing objects
553  * from the GTT and/or fence registers to make room.  So performance may
554  * suffer if the GTT working set is large or there are few fence registers
555  * left.
556  */
557 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
558 {
559         struct drm_gem_object *obj = vma->vm_private_data;
560         struct drm_device *dev = obj->dev;
561         struct drm_i915_private *dev_priv = dev->dev_private;
562         struct drm_i915_gem_object *obj_priv = obj->driver_private;
563         pgoff_t page_offset;
564         unsigned long pfn;
565         int ret = 0;
566         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
567
568         /* We don't use vmf->pgoff since that has the fake offset */
569         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
570                 PAGE_SHIFT;
571
572         /* Now bind it into the GTT if needed */
573         mutex_lock(&dev->struct_mutex);
574         if (!obj_priv->gtt_space) {
575                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
576                 if (ret) {
577                         mutex_unlock(&dev->struct_mutex);
578                         return VM_FAULT_SIGBUS;
579                 }
580                 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
581         }
582
583         /* Need a new fence register? */
584         if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
585             obj_priv->tiling_mode != I915_TILING_NONE) {
586                 ret = i915_gem_object_get_fence_reg(obj, write);
587                 if (ret) {
588                         mutex_unlock(&dev->struct_mutex);
589                         return VM_FAULT_SIGBUS;
590                 }
591         }
592
593         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
594                 page_offset;
595
596         /* Finally, remap it using the new GTT offset */
597         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
598
599         mutex_unlock(&dev->struct_mutex);
600
601         switch (ret) {
602         case -ENOMEM:
603         case -EAGAIN:
604                 return VM_FAULT_OOM;
605         case -EFAULT:
606                 return VM_FAULT_SIGBUS;
607         default:
608                 return VM_FAULT_NOPAGE;
609         }
610 }
611
612 /**
613  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
614  * @obj: obj in question
615  *
616  * GEM memory mapping works by handing back to userspace a fake mmap offset
617  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
618  * up the object based on the offset and sets up the various memory mapping
619  * structures.
620  *
621  * This routine allocates and attaches a fake offset for @obj.
622  */
623 static int
624 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
625 {
626         struct drm_device *dev = obj->dev;
627         struct drm_gem_mm *mm = dev->mm_private;
628         struct drm_i915_gem_object *obj_priv = obj->driver_private;
629         struct drm_map_list *list;
630         struct drm_map *map;
631         int ret = 0;
632
633         /* Set the object up for mmap'ing */
634         list = &obj->map_list;
635         list->map = drm_calloc(1, sizeof(struct drm_map_list),
636                                DRM_MEM_DRIVER);
637         if (!list->map)
638                 return -ENOMEM;
639
640         map = list->map;
641         map->type = _DRM_GEM;
642         map->size = obj->size;
643         map->handle = obj;
644
645         /* Get a DRM GEM mmap offset allocated... */
646         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
647                                                     obj->size / PAGE_SIZE, 0, 0);
648         if (!list->file_offset_node) {
649                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
650                 ret = -ENOMEM;
651                 goto out_free_list;
652         }
653
654         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
655                                                   obj->size / PAGE_SIZE, 0);
656         if (!list->file_offset_node) {
657                 ret = -ENOMEM;
658                 goto out_free_list;
659         }
660
661         list->hash.key = list->file_offset_node->start;
662         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
663                 DRM_ERROR("failed to add to map hash\n");
664                 goto out_free_mm;
665         }
666
667         /* By now we should be all set, any drm_mmap request on the offset
668          * below will get to our mmap & fault handler */
669         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
670
671         return 0;
672
673 out_free_mm:
674         drm_mm_put_block(list->file_offset_node);
675 out_free_list:
676         drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
677
678         return ret;
679 }
680
681 static void
682 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
683 {
684         struct drm_device *dev = obj->dev;
685         struct drm_i915_gem_object *obj_priv = obj->driver_private;
686         struct drm_gem_mm *mm = dev->mm_private;
687         struct drm_map_list *list;
688
689         list = &obj->map_list;
690         drm_ht_remove_item(&mm->offset_hash, &list->hash);
691
692         if (list->file_offset_node) {
693                 drm_mm_put_block(list->file_offset_node);
694                 list->file_offset_node = NULL;
695         }
696
697         if (list->map) {
698                 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
699                 list->map = NULL;
700         }
701
702         obj_priv->mmap_offset = 0;
703 }
704
705 /**
706  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
707  * @obj: object to check
708  *
709  * Return the required GTT alignment for an object, taking into account
710  * potential fence register mapping if needed.
711  */
712 static uint32_t
713 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
714 {
715         struct drm_device *dev = obj->dev;
716         struct drm_i915_gem_object *obj_priv = obj->driver_private;
717         int start, i;
718
719         /*
720          * Minimum alignment is 4k (GTT page size), but might be greater
721          * if a fence register is needed for the object.
722          */
723         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
724                 return 4096;
725
726         /*
727          * Previous chips need to be aligned to the size of the smallest
728          * fence register that can contain the object.
729          */
730         if (IS_I9XX(dev))
731                 start = 1024*1024;
732         else
733                 start = 512*1024;
734
735         for (i = start; i < obj->size; i <<= 1)
736                 ;
737
738         return i;
739 }
740
741 /**
742  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
743  * @dev: DRM device
744  * @data: GTT mapping ioctl data
745  * @file_priv: GEM object info
746  *
747  * Simply returns the fake offset to userspace so it can mmap it.
748  * The mmap call will end up in drm_gem_mmap(), which will set things
749  * up so we can get faults in the handler above.
750  *
751  * The fault handler will take care of binding the object into the GTT
752  * (since it may have been evicted to make room for something), allocating
753  * a fence register, and mapping the appropriate aperture address into
754  * userspace.
755  */
756 int
757 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
758                         struct drm_file *file_priv)
759 {
760         struct drm_i915_gem_mmap_gtt *args = data;
761         struct drm_i915_private *dev_priv = dev->dev_private;
762         struct drm_gem_object *obj;
763         struct drm_i915_gem_object *obj_priv;
764         int ret;
765
766         if (!(dev->driver->driver_features & DRIVER_GEM))
767                 return -ENODEV;
768
769         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
770         if (obj == NULL)
771                 return -EBADF;
772
773         mutex_lock(&dev->struct_mutex);
774
775         obj_priv = obj->driver_private;
776
777         if (!obj_priv->mmap_offset) {
778                 ret = i915_gem_create_mmap_offset(obj);
779                 if (ret) {
780                         drm_gem_object_unreference(obj);
781                         mutex_unlock(&dev->struct_mutex);
782                         return ret;
783                 }
784         }
785
786         args->offset = obj_priv->mmap_offset;
787
788         obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
789
790         /* Make sure the alignment is correct for fence regs etc */
791         if (obj_priv->agp_mem &&
792             (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
793                 drm_gem_object_unreference(obj);
794                 mutex_unlock(&dev->struct_mutex);
795                 return -EINVAL;
796         }
797
798         /*
799          * Pull it into the GTT so that we have a page list (makes the
800          * initial fault faster and any subsequent flushing possible).
801          */
802         if (!obj_priv->agp_mem) {
803                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
804                 if (ret) {
805                         drm_gem_object_unreference(obj);
806                         mutex_unlock(&dev->struct_mutex);
807                         return ret;
808                 }
809                 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
810         }
811
812         drm_gem_object_unreference(obj);
813         mutex_unlock(&dev->struct_mutex);
814
815         return 0;
816 }
817
818 static void
819 i915_gem_object_free_page_list(struct drm_gem_object *obj)
820 {
821         struct drm_i915_gem_object *obj_priv = obj->driver_private;
822         int page_count = obj->size / PAGE_SIZE;
823         int i;
824
825         if (obj_priv->page_list == NULL)
826                 return;
827
828
829         for (i = 0; i < page_count; i++)
830                 if (obj_priv->page_list[i] != NULL) {
831                         if (obj_priv->dirty)
832                                 set_page_dirty(obj_priv->page_list[i]);
833                         mark_page_accessed(obj_priv->page_list[i]);
834                         page_cache_release(obj_priv->page_list[i]);
835                 }
836         obj_priv->dirty = 0;
837
838         drm_free(obj_priv->page_list,
839                  page_count * sizeof(struct page *),
840                  DRM_MEM_DRIVER);
841         obj_priv->page_list = NULL;
842 }
843
844 static void
845 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
846 {
847         struct drm_device *dev = obj->dev;
848         drm_i915_private_t *dev_priv = dev->dev_private;
849         struct drm_i915_gem_object *obj_priv = obj->driver_private;
850
851         /* Add a reference if we're newly entering the active list. */
852         if (!obj_priv->active) {
853                 drm_gem_object_reference(obj);
854                 obj_priv->active = 1;
855         }
856         /* Move from whatever list we were on to the tail of execution. */
857         list_move_tail(&obj_priv->list,
858                        &dev_priv->mm.active_list);
859         obj_priv->last_rendering_seqno = seqno;
860 }
861
862 static void
863 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
864 {
865         struct drm_device *dev = obj->dev;
866         drm_i915_private_t *dev_priv = dev->dev_private;
867         struct drm_i915_gem_object *obj_priv = obj->driver_private;
868
869         BUG_ON(!obj_priv->active);
870         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
871         obj_priv->last_rendering_seqno = 0;
872 }
873
874 static void
875 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
876 {
877         struct drm_device *dev = obj->dev;
878         drm_i915_private_t *dev_priv = dev->dev_private;
879         struct drm_i915_gem_object *obj_priv = obj->driver_private;
880
881         i915_verify_inactive(dev, __FILE__, __LINE__);
882         if (obj_priv->pin_count != 0)
883                 list_del_init(&obj_priv->list);
884         else
885                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
886
887         obj_priv->last_rendering_seqno = 0;
888         if (obj_priv->active) {
889                 obj_priv->active = 0;
890                 drm_gem_object_unreference(obj);
891         }
892         i915_verify_inactive(dev, __FILE__, __LINE__);
893 }
894
895 /**
896  * Creates a new sequence number, emitting a write of it to the status page
897  * plus an interrupt, which will trigger i915_user_interrupt_handler.
898  *
899  * Must be called with struct_lock held.
900  *
901  * Returned sequence numbers are nonzero on success.
902  */
903 static uint32_t
904 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
905 {
906         drm_i915_private_t *dev_priv = dev->dev_private;
907         struct drm_i915_gem_request *request;
908         uint32_t seqno;
909         int was_empty;
910         RING_LOCALS;
911
912         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
913         if (request == NULL)
914                 return 0;
915
916         /* Grab the seqno we're going to make this request be, and bump the
917          * next (skipping 0 so it can be the reserved no-seqno value).
918          */
919         seqno = dev_priv->mm.next_gem_seqno;
920         dev_priv->mm.next_gem_seqno++;
921         if (dev_priv->mm.next_gem_seqno == 0)
922                 dev_priv->mm.next_gem_seqno++;
923
924         BEGIN_LP_RING(4);
925         OUT_RING(MI_STORE_DWORD_INDEX);
926         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
927         OUT_RING(seqno);
928
929         OUT_RING(MI_USER_INTERRUPT);
930         ADVANCE_LP_RING();
931
932         DRM_DEBUG("%d\n", seqno);
933
934         request->seqno = seqno;
935         request->emitted_jiffies = jiffies;
936         was_empty = list_empty(&dev_priv->mm.request_list);
937         list_add_tail(&request->list, &dev_priv->mm.request_list);
938
939         /* Associate any objects on the flushing list matching the write
940          * domain we're flushing with our flush.
941          */
942         if (flush_domains != 0) {
943                 struct drm_i915_gem_object *obj_priv, *next;
944
945                 list_for_each_entry_safe(obj_priv, next,
946                                          &dev_priv->mm.flushing_list, list) {
947                         struct drm_gem_object *obj = obj_priv->obj;
948
949                         if ((obj->write_domain & flush_domains) ==
950                             obj->write_domain) {
951                                 obj->write_domain = 0;
952                                 i915_gem_object_move_to_active(obj, seqno);
953                         }
954                 }
955
956         }
957
958         if (was_empty && !dev_priv->mm.suspended)
959                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
960         return seqno;
961 }
962
963 /**
964  * Command execution barrier
965  *
966  * Ensures that all commands in the ring are finished
967  * before signalling the CPU
968  */
969 static uint32_t
970 i915_retire_commands(struct drm_device *dev)
971 {
972         drm_i915_private_t *dev_priv = dev->dev_private;
973         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
974         uint32_t flush_domains = 0;
975         RING_LOCALS;
976
977         /* The sampler always gets flushed on i965 (sigh) */
978         if (IS_I965G(dev))
979                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
980         BEGIN_LP_RING(2);
981         OUT_RING(cmd);
982         OUT_RING(0); /* noop */
983         ADVANCE_LP_RING();
984         return flush_domains;
985 }
986
987 /**
988  * Moves buffers associated only with the given active seqno from the active
989  * to inactive list, potentially freeing them.
990  */
991 static void
992 i915_gem_retire_request(struct drm_device *dev,
993                         struct drm_i915_gem_request *request)
994 {
995         drm_i915_private_t *dev_priv = dev->dev_private;
996
997         /* Move any buffers on the active list that are no longer referenced
998          * by the ringbuffer to the flushing/inactive lists as appropriate.
999          */
1000         while (!list_empty(&dev_priv->mm.active_list)) {
1001                 struct drm_gem_object *obj;
1002                 struct drm_i915_gem_object *obj_priv;
1003
1004                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1005                                             struct drm_i915_gem_object,
1006                                             list);
1007                 obj = obj_priv->obj;
1008
1009                 /* If the seqno being retired doesn't match the oldest in the
1010                  * list, then the oldest in the list must still be newer than
1011                  * this seqno.
1012                  */
1013                 if (obj_priv->last_rendering_seqno != request->seqno)
1014                         return;
1015
1016 #if WATCH_LRU
1017                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1018                          __func__, request->seqno, obj);
1019 #endif
1020
1021                 if (obj->write_domain != 0)
1022                         i915_gem_object_move_to_flushing(obj);
1023                 else
1024                         i915_gem_object_move_to_inactive(obj);
1025         }
1026 }
1027
1028 /**
1029  * Returns true if seq1 is later than seq2.
1030  */
1031 static int
1032 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1033 {
1034         return (int32_t)(seq1 - seq2) >= 0;
1035 }
1036
1037 uint32_t
1038 i915_get_gem_seqno(struct drm_device *dev)
1039 {
1040         drm_i915_private_t *dev_priv = dev->dev_private;
1041
1042         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1043 }
1044
1045 /**
1046  * This function clears the request list as sequence numbers are passed.
1047  */
1048 void
1049 i915_gem_retire_requests(struct drm_device *dev)
1050 {
1051         drm_i915_private_t *dev_priv = dev->dev_private;
1052         uint32_t seqno;
1053
1054         if (!dev_priv->hw_status_page)
1055                 return;
1056
1057         seqno = i915_get_gem_seqno(dev);
1058
1059         while (!list_empty(&dev_priv->mm.request_list)) {
1060                 struct drm_i915_gem_request *request;
1061                 uint32_t retiring_seqno;
1062
1063                 request = list_first_entry(&dev_priv->mm.request_list,
1064                                            struct drm_i915_gem_request,
1065                                            list);
1066                 retiring_seqno = request->seqno;
1067
1068                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1069                     dev_priv->mm.wedged) {
1070                         i915_gem_retire_request(dev, request);
1071
1072                         list_del(&request->list);
1073                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1074                 } else
1075                         break;
1076         }
1077 }
1078
1079 void
1080 i915_gem_retire_work_handler(struct work_struct *work)
1081 {
1082         drm_i915_private_t *dev_priv;
1083         struct drm_device *dev;
1084
1085         dev_priv = container_of(work, drm_i915_private_t,
1086                                 mm.retire_work.work);
1087         dev = dev_priv->dev;
1088
1089         mutex_lock(&dev->struct_mutex);
1090         i915_gem_retire_requests(dev);
1091         if (!dev_priv->mm.suspended &&
1092             !list_empty(&dev_priv->mm.request_list))
1093                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1094         mutex_unlock(&dev->struct_mutex);
1095 }
1096
1097 /**
1098  * Waits for a sequence number to be signaled, and cleans up the
1099  * request and object lists appropriately for that event.
1100  */
1101 static int
1102 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1103 {
1104         drm_i915_private_t *dev_priv = dev->dev_private;
1105         int ret = 0;
1106
1107         BUG_ON(seqno == 0);
1108
1109         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1110                 dev_priv->mm.waiting_gem_seqno = seqno;
1111                 i915_user_irq_get(dev);
1112                 ret = wait_event_interruptible(dev_priv->irq_queue,
1113                                                i915_seqno_passed(i915_get_gem_seqno(dev),
1114                                                                  seqno) ||
1115                                                dev_priv->mm.wedged);
1116                 i915_user_irq_put(dev);
1117                 dev_priv->mm.waiting_gem_seqno = 0;
1118         }
1119         if (dev_priv->mm.wedged)
1120                 ret = -EIO;
1121
1122         if (ret && ret != -ERESTARTSYS)
1123                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1124                           __func__, ret, seqno, i915_get_gem_seqno(dev));
1125
1126         /* Directly dispatch request retiring.  While we have the work queue
1127          * to handle this, the waiter on a request often wants an associated
1128          * buffer to have made it to the inactive list, and we would need
1129          * a separate wait queue to handle that.
1130          */
1131         if (ret == 0)
1132                 i915_gem_retire_requests(dev);
1133
1134         return ret;
1135 }
1136
1137 static void
1138 i915_gem_flush(struct drm_device *dev,
1139                uint32_t invalidate_domains,
1140                uint32_t flush_domains)
1141 {
1142         drm_i915_private_t *dev_priv = dev->dev_private;
1143         uint32_t cmd;
1144         RING_LOCALS;
1145
1146 #if WATCH_EXEC
1147         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1148                   invalidate_domains, flush_domains);
1149 #endif
1150
1151         if (flush_domains & I915_GEM_DOMAIN_CPU)
1152                 drm_agp_chipset_flush(dev);
1153
1154         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1155                                                      I915_GEM_DOMAIN_GTT)) {
1156                 /*
1157                  * read/write caches:
1158                  *
1159                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1160                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
1161                  * also flushed at 2d versus 3d pipeline switches.
1162                  *
1163                  * read-only caches:
1164                  *
1165                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1166                  * MI_READ_FLUSH is set, and is always flushed on 965.
1167                  *
1168                  * I915_GEM_DOMAIN_COMMAND may not exist?
1169                  *
1170                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1171                  * invalidated when MI_EXE_FLUSH is set.
1172                  *
1173                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1174                  * invalidated with every MI_FLUSH.
1175                  *
1176                  * TLBs:
1177                  *
1178                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1179                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1180                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1181                  * are flushed at any MI_FLUSH.
1182                  */
1183
1184                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1185                 if ((invalidate_domains|flush_domains) &
1186                     I915_GEM_DOMAIN_RENDER)
1187                         cmd &= ~MI_NO_WRITE_FLUSH;
1188                 if (!IS_I965G(dev)) {
1189                         /*
1190                          * On the 965, the sampler cache always gets flushed
1191                          * and this bit is reserved.
1192                          */
1193                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1194                                 cmd |= MI_READ_FLUSH;
1195                 }
1196                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1197                         cmd |= MI_EXE_FLUSH;
1198
1199 #if WATCH_EXEC
1200                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1201 #endif
1202                 BEGIN_LP_RING(2);
1203                 OUT_RING(cmd);
1204                 OUT_RING(0); /* noop */
1205                 ADVANCE_LP_RING();
1206         }
1207 }
1208
1209 /**
1210  * Ensures that all rendering to the object has completed and the object is
1211  * safe to unbind from the GTT or access from the CPU.
1212  */
1213 static int
1214 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1215 {
1216         struct drm_device *dev = obj->dev;
1217         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1218         int ret;
1219
1220         /* This function only exists to support waiting for existing rendering,
1221          * not for emitting required flushes.
1222          */
1223         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1224
1225         /* If there is rendering queued on the buffer being evicted, wait for
1226          * it.
1227          */
1228         if (obj_priv->active) {
1229 #if WATCH_BUF
1230                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1231                           __func__, obj, obj_priv->last_rendering_seqno);
1232 #endif
1233                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1234                 if (ret != 0)
1235                         return ret;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * Unbinds an object from the GTT aperture.
1243  */
1244 int
1245 i915_gem_object_unbind(struct drm_gem_object *obj)
1246 {
1247         struct drm_device *dev = obj->dev;
1248         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1249         loff_t offset;
1250         int ret = 0;
1251
1252 #if WATCH_BUF
1253         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1254         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1255 #endif
1256         if (obj_priv->gtt_space == NULL)
1257                 return 0;
1258
1259         if (obj_priv->pin_count != 0) {
1260                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1261                 return -EINVAL;
1262         }
1263
1264         /* Move the object to the CPU domain to ensure that
1265          * any possible CPU writes while it's not in the GTT
1266          * are flushed when we go to remap it. This will
1267          * also ensure that all pending GPU writes are finished
1268          * before we unbind.
1269          */
1270         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1271         if (ret) {
1272                 if (ret != -ERESTARTSYS)
1273                         DRM_ERROR("set_domain failed: %d\n", ret);
1274                 return ret;
1275         }
1276
1277         if (obj_priv->agp_mem != NULL) {
1278                 drm_unbind_agp(obj_priv->agp_mem);
1279                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1280                 obj_priv->agp_mem = NULL;
1281         }
1282
1283         BUG_ON(obj_priv->active);
1284
1285         /* blow away mappings if mapped through GTT */
1286         offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1287         if (dev->dev_mapping)
1288                 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1289
1290         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1291                 i915_gem_clear_fence_reg(obj);
1292
1293         i915_gem_object_free_page_list(obj);
1294
1295         if (obj_priv->gtt_space) {
1296                 atomic_dec(&dev->gtt_count);
1297                 atomic_sub(obj->size, &dev->gtt_memory);
1298
1299                 drm_mm_put_block(obj_priv->gtt_space);
1300                 obj_priv->gtt_space = NULL;
1301         }
1302
1303         /* Remove ourselves from the LRU list if present. */
1304         if (!list_empty(&obj_priv->list))
1305                 list_del_init(&obj_priv->list);
1306
1307         return 0;
1308 }
1309
1310 static int
1311 i915_gem_evict_something(struct drm_device *dev)
1312 {
1313         drm_i915_private_t *dev_priv = dev->dev_private;
1314         struct drm_gem_object *obj;
1315         struct drm_i915_gem_object *obj_priv;
1316         int ret = 0;
1317
1318         for (;;) {
1319                 /* If there's an inactive buffer available now, grab it
1320                  * and be done.
1321                  */
1322                 if (!list_empty(&dev_priv->mm.inactive_list)) {
1323                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1324                                                     struct drm_i915_gem_object,
1325                                                     list);
1326                         obj = obj_priv->obj;
1327                         BUG_ON(obj_priv->pin_count != 0);
1328 #if WATCH_LRU
1329                         DRM_INFO("%s: evicting %p\n", __func__, obj);
1330 #endif
1331                         BUG_ON(obj_priv->active);
1332
1333                         /* Wait on the rendering and unbind the buffer. */
1334                         ret = i915_gem_object_unbind(obj);
1335                         break;
1336                 }
1337
1338                 /* If we didn't get anything, but the ring is still processing
1339                  * things, wait for one of those things to finish and hopefully
1340                  * leave us a buffer to evict.
1341                  */
1342                 if (!list_empty(&dev_priv->mm.request_list)) {
1343                         struct drm_i915_gem_request *request;
1344
1345                         request = list_first_entry(&dev_priv->mm.request_list,
1346                                                    struct drm_i915_gem_request,
1347                                                    list);
1348
1349                         ret = i915_wait_request(dev, request->seqno);
1350                         if (ret)
1351                                 break;
1352
1353                         /* if waiting caused an object to become inactive,
1354                          * then loop around and wait for it. Otherwise, we
1355                          * assume that waiting freed and unbound something,
1356                          * so there should now be some space in the GTT
1357                          */
1358                         if (!list_empty(&dev_priv->mm.inactive_list))
1359                                 continue;
1360                         break;
1361                 }
1362
1363                 /* If we didn't have anything on the request list but there
1364                  * are buffers awaiting a flush, emit one and try again.
1365                  * When we wait on it, those buffers waiting for that flush
1366                  * will get moved to inactive.
1367                  */
1368                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1369                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1370                                                     struct drm_i915_gem_object,
1371                                                     list);
1372                         obj = obj_priv->obj;
1373
1374                         i915_gem_flush(dev,
1375                                        obj->write_domain,
1376                                        obj->write_domain);
1377                         i915_add_request(dev, obj->write_domain);
1378
1379                         obj = NULL;
1380                         continue;
1381                 }
1382
1383                 DRM_ERROR("inactive empty %d request empty %d "
1384                           "flushing empty %d\n",
1385                           list_empty(&dev_priv->mm.inactive_list),
1386                           list_empty(&dev_priv->mm.request_list),
1387                           list_empty(&dev_priv->mm.flushing_list));
1388                 /* If we didn't do any of the above, there's nothing to be done
1389                  * and we just can't fit it in.
1390                  */
1391                 return -ENOMEM;
1392         }
1393         return ret;
1394 }
1395
1396 static int
1397 i915_gem_evict_everything(struct drm_device *dev)
1398 {
1399         int ret;
1400
1401         for (;;) {
1402                 ret = i915_gem_evict_something(dev);
1403                 if (ret != 0)
1404                         break;
1405         }
1406         if (ret == -ENOMEM)
1407                 return 0;
1408         return ret;
1409 }
1410
1411 static int
1412 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1413 {
1414         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1415         int page_count, i;
1416         struct address_space *mapping;
1417         struct inode *inode;
1418         struct page *page;
1419         int ret;
1420
1421         if (obj_priv->page_list)
1422                 return 0;
1423
1424         /* Get the list of pages out of our struct file.  They'll be pinned
1425          * at this point until we release them.
1426          */
1427         page_count = obj->size / PAGE_SIZE;
1428         BUG_ON(obj_priv->page_list != NULL);
1429         obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1430                                          DRM_MEM_DRIVER);
1431         if (obj_priv->page_list == NULL) {
1432                 DRM_ERROR("Faled to allocate page list\n");
1433                 return -ENOMEM;
1434         }
1435
1436         inode = obj->filp->f_path.dentry->d_inode;
1437         mapping = inode->i_mapping;
1438         for (i = 0; i < page_count; i++) {
1439                 page = read_mapping_page(mapping, i, NULL);
1440                 if (IS_ERR(page)) {
1441                         ret = PTR_ERR(page);
1442                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
1443                         i915_gem_object_free_page_list(obj);
1444                         return ret;
1445                 }
1446                 obj_priv->page_list[i] = page;
1447         }
1448         return 0;
1449 }
1450
1451 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1452 {
1453         struct drm_gem_object *obj = reg->obj;
1454         struct drm_device *dev = obj->dev;
1455         drm_i915_private_t *dev_priv = dev->dev_private;
1456         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1457         int regnum = obj_priv->fence_reg;
1458         uint64_t val;
1459
1460         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
1461                     0xfffff000) << 32;
1462         val |= obj_priv->gtt_offset & 0xfffff000;
1463         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1464         if (obj_priv->tiling_mode == I915_TILING_Y)
1465                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1466         val |= I965_FENCE_REG_VALID;
1467
1468         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
1469 }
1470
1471 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1472 {
1473         struct drm_gem_object *obj = reg->obj;
1474         struct drm_device *dev = obj->dev;
1475         drm_i915_private_t *dev_priv = dev->dev_private;
1476         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1477         int regnum = obj_priv->fence_reg;
1478         int tile_width;
1479         uint32_t val;
1480         uint32_t pitch_val;
1481
1482         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1483             (obj_priv->gtt_offset & (obj->size - 1))) {
1484                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1485                      __func__, obj_priv->gtt_offset, obj->size);
1486                 return;
1487         }
1488
1489         if (obj_priv->tiling_mode == I915_TILING_Y &&
1490             HAS_128_BYTE_Y_TILING(dev))
1491                 tile_width = 128;
1492         else
1493                 tile_width = 512;
1494
1495         /* Note: pitch better be a power of two tile widths */
1496         pitch_val = obj_priv->stride / tile_width;
1497         pitch_val = ffs(pitch_val) - 1;
1498
1499         val = obj_priv->gtt_offset;
1500         if (obj_priv->tiling_mode == I915_TILING_Y)
1501                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1502         val |= I915_FENCE_SIZE_BITS(obj->size);
1503         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1504         val |= I830_FENCE_REG_VALID;
1505
1506         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1507 }
1508
1509 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1510 {
1511         struct drm_gem_object *obj = reg->obj;
1512         struct drm_device *dev = obj->dev;
1513         drm_i915_private_t *dev_priv = dev->dev_private;
1514         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1515         int regnum = obj_priv->fence_reg;
1516         uint32_t val;
1517         uint32_t pitch_val;
1518
1519         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1520             (obj_priv->gtt_offset & (obj->size - 1))) {
1521                 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1522                      __func__, obj_priv->gtt_offset);
1523                 return;
1524         }
1525
1526         pitch_val = (obj_priv->stride / 128) - 1;
1527
1528         val = obj_priv->gtt_offset;
1529         if (obj_priv->tiling_mode == I915_TILING_Y)
1530                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1531         val |= I830_FENCE_SIZE_BITS(obj->size);
1532         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1533         val |= I830_FENCE_REG_VALID;
1534
1535         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1536
1537 }
1538
1539 /**
1540  * i915_gem_object_get_fence_reg - set up a fence reg for an object
1541  * @obj: object to map through a fence reg
1542  * @write: object is about to be written
1543  *
1544  * When mapping objects through the GTT, userspace wants to be able to write
1545  * to them without having to worry about swizzling if the object is tiled.
1546  *
1547  * This function walks the fence regs looking for a free one for @obj,
1548  * stealing one if it can't find any.
1549  *
1550  * It then sets up the reg based on the object's properties: address, pitch
1551  * and tiling format.
1552  */
1553 static int
1554 i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1555 {
1556         struct drm_device *dev = obj->dev;
1557         struct drm_i915_private *dev_priv = dev->dev_private;
1558         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1559         struct drm_i915_fence_reg *reg = NULL;
1560         struct drm_i915_gem_object *old_obj_priv = NULL;
1561         int i, ret, avail;
1562
1563         switch (obj_priv->tiling_mode) {
1564         case I915_TILING_NONE:
1565                 WARN(1, "allocating a fence for non-tiled object?\n");
1566                 break;
1567         case I915_TILING_X:
1568                 if (!obj_priv->stride)
1569                         return -EINVAL;
1570                 WARN((obj_priv->stride & (512 - 1)),
1571                      "object 0x%08x is X tiled but has non-512B pitch\n",
1572                      obj_priv->gtt_offset);
1573                 break;
1574         case I915_TILING_Y:
1575                 if (!obj_priv->stride)
1576                         return -EINVAL;
1577                 WARN((obj_priv->stride & (128 - 1)),
1578                      "object 0x%08x is Y tiled but has non-128B pitch\n",
1579                      obj_priv->gtt_offset);
1580                 break;
1581         }
1582
1583         /* First try to find a free reg */
1584 try_again:
1585         avail = 0;
1586         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1587                 reg = &dev_priv->fence_regs[i];
1588                 if (!reg->obj)
1589                         break;
1590
1591                 old_obj_priv = reg->obj->driver_private;
1592                 if (!old_obj_priv->pin_count)
1593                     avail++;
1594         }
1595
1596         /* None available, try to steal one or wait for a user to finish */
1597         if (i == dev_priv->num_fence_regs) {
1598                 uint32_t seqno = dev_priv->mm.next_gem_seqno;
1599                 loff_t offset;
1600
1601                 if (avail == 0)
1602                         return -ENOMEM;
1603
1604                 for (i = dev_priv->fence_reg_start;
1605                      i < dev_priv->num_fence_regs; i++) {
1606                         uint32_t this_seqno;
1607
1608                         reg = &dev_priv->fence_regs[i];
1609                         old_obj_priv = reg->obj->driver_private;
1610
1611                         if (old_obj_priv->pin_count)
1612                                 continue;
1613
1614                         /* i915 uses fences for GPU access to tiled buffers */
1615                         if (IS_I965G(dev) || !old_obj_priv->active)
1616                                 break;
1617
1618                         /* find the seqno of the first available fence */
1619                         this_seqno = old_obj_priv->last_rendering_seqno;
1620                         if (this_seqno != 0 &&
1621                             reg->obj->write_domain == 0 &&
1622                             i915_seqno_passed(seqno, this_seqno))
1623                                 seqno = this_seqno;
1624                 }
1625
1626                 /*
1627                  * Now things get ugly... we have to wait for one of the
1628                  * objects to finish before trying again.
1629                  */
1630                 if (i == dev_priv->num_fence_regs) {
1631                         if (seqno == dev_priv->mm.next_gem_seqno) {
1632                                 i915_gem_flush(dev,
1633                                                I915_GEM_GPU_DOMAINS,
1634                                                I915_GEM_GPU_DOMAINS);
1635                                 seqno = i915_add_request(dev,
1636                                                          I915_GEM_GPU_DOMAINS);
1637                                 if (seqno == 0)
1638                                         return -ENOMEM;
1639                         }
1640
1641                         ret = i915_wait_request(dev, seqno);
1642                         if (ret)
1643                                 return ret;
1644                         goto try_again;
1645                 }
1646
1647                 BUG_ON(old_obj_priv->active ||
1648                        (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
1649
1650                 /*
1651                  * Zap this virtual mapping so we can set up a fence again
1652                  * for this object next time we need it.
1653                  */
1654                 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
1655                 if (dev->dev_mapping)
1656                         unmap_mapping_range(dev->dev_mapping, offset,
1657                                             reg->obj->size, 1);
1658                 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
1659         }
1660
1661         obj_priv->fence_reg = i;
1662         reg->obj = obj;
1663
1664         if (IS_I965G(dev))
1665                 i965_write_fence_reg(reg);
1666         else if (IS_I9XX(dev))
1667                 i915_write_fence_reg(reg);
1668         else
1669                 i830_write_fence_reg(reg);
1670
1671         return 0;
1672 }
1673
1674 /**
1675  * i915_gem_clear_fence_reg - clear out fence register info
1676  * @obj: object to clear
1677  *
1678  * Zeroes out the fence register itself and clears out the associated
1679  * data structures in dev_priv and obj_priv.
1680  */
1681 static void
1682 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1683 {
1684         struct drm_device *dev = obj->dev;
1685         drm_i915_private_t *dev_priv = dev->dev_private;
1686         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1687
1688         if (IS_I965G(dev))
1689                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1690         else
1691                 I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
1692
1693         dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1694         obj_priv->fence_reg = I915_FENCE_REG_NONE;
1695 }
1696
1697 /**
1698  * Finds free space in the GTT aperture and binds the object there.
1699  */
1700 static int
1701 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1702 {
1703         struct drm_device *dev = obj->dev;
1704         drm_i915_private_t *dev_priv = dev->dev_private;
1705         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1706         struct drm_mm_node *free_space;
1707         int page_count, ret;
1708
1709         if (dev_priv->mm.suspended)
1710                 return -EBUSY;
1711         if (alignment == 0)
1712                 alignment = i915_gem_get_gtt_alignment(obj);
1713         if (alignment & (PAGE_SIZE - 1)) {
1714                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1715                 return -EINVAL;
1716         }
1717
1718  search_free:
1719         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1720                                         obj->size, alignment, 0);
1721         if (free_space != NULL) {
1722                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1723                                                        alignment);
1724                 if (obj_priv->gtt_space != NULL) {
1725                         obj_priv->gtt_space->private = obj;
1726                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1727                 }
1728         }
1729         if (obj_priv->gtt_space == NULL) {
1730                 /* If the gtt is empty and we're still having trouble
1731                  * fitting our object in, we're out of memory.
1732                  */
1733 #if WATCH_LRU
1734                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1735 #endif
1736                 if (list_empty(&dev_priv->mm.inactive_list) &&
1737                     list_empty(&dev_priv->mm.flushing_list) &&
1738                     list_empty(&dev_priv->mm.active_list)) {
1739                         DRM_ERROR("GTT full, but LRU list empty\n");
1740                         return -ENOMEM;
1741                 }
1742
1743                 ret = i915_gem_evict_something(dev);
1744                 if (ret != 0) {
1745                         if (ret != -ERESTARTSYS)
1746                                 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1747                         return ret;
1748                 }
1749                 goto search_free;
1750         }
1751
1752 #if WATCH_BUF
1753         DRM_INFO("Binding object of size %d at 0x%08x\n",
1754                  obj->size, obj_priv->gtt_offset);
1755 #endif
1756         ret = i915_gem_object_get_page_list(obj);
1757         if (ret) {
1758                 drm_mm_put_block(obj_priv->gtt_space);
1759                 obj_priv->gtt_space = NULL;
1760                 return ret;
1761         }
1762
1763         page_count = obj->size / PAGE_SIZE;
1764         /* Create an AGP memory structure pointing at our pages, and bind it
1765          * into the GTT.
1766          */
1767         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1768                                                obj_priv->page_list,
1769                                                page_count,
1770                                                obj_priv->gtt_offset,
1771                                                obj_priv->agp_type);
1772         if (obj_priv->agp_mem == NULL) {
1773                 i915_gem_object_free_page_list(obj);
1774                 drm_mm_put_block(obj_priv->gtt_space);
1775                 obj_priv->gtt_space = NULL;
1776                 return -ENOMEM;
1777         }
1778         atomic_inc(&dev->gtt_count);
1779         atomic_add(obj->size, &dev->gtt_memory);
1780
1781         /* Assert that the object is not currently in any GPU domain. As it
1782          * wasn't in the GTT, there shouldn't be any way it could have been in
1783          * a GPU cache
1784          */
1785         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1786         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1787
1788         return 0;
1789 }
1790
1791 void
1792 i915_gem_clflush_object(struct drm_gem_object *obj)
1793 {
1794         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1795
1796         /* If we don't have a page list set up, then we're not pinned
1797          * to GPU, and we can ignore the cache flush because it'll happen
1798          * again at bind time.
1799          */
1800         if (obj_priv->page_list == NULL)
1801                 return;
1802
1803         drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1804 }
1805
1806 /** Flushes any GPU write domain for the object if it's dirty. */
1807 static void
1808 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1809 {
1810         struct drm_device *dev = obj->dev;
1811         uint32_t seqno;
1812
1813         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1814                 return;
1815
1816         /* Queue the GPU write cache flushing we need. */
1817         i915_gem_flush(dev, 0, obj->write_domain);
1818         seqno = i915_add_request(dev, obj->write_domain);
1819         obj->write_domain = 0;
1820         i915_gem_object_move_to_active(obj, seqno);
1821 }
1822
1823 /** Flushes the GTT write domain for the object if it's dirty. */
1824 static void
1825 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1826 {
1827         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1828                 return;
1829
1830         /* No actual flushing is required for the GTT write domain.   Writes
1831          * to it immediately go to main memory as far as we know, so there's
1832          * no chipset flush.  It also doesn't land in render cache.
1833          */
1834         obj->write_domain = 0;
1835 }
1836
1837 /** Flushes the CPU write domain for the object if it's dirty. */
1838 static void
1839 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1840 {
1841         struct drm_device *dev = obj->dev;
1842
1843         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1844                 return;
1845
1846         i915_gem_clflush_object(obj);
1847         drm_agp_chipset_flush(dev);
1848         obj->write_domain = 0;
1849 }
1850
1851 /**
1852  * Moves a single object to the GTT read, and possibly write domain.
1853  *
1854  * This function returns when the move is complete, including waiting on
1855  * flushes to occur.
1856  */
1857 int
1858 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1859 {
1860         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1861         int ret;
1862
1863         /* Not valid to be called on unbound objects. */
1864         if (obj_priv->gtt_space == NULL)
1865                 return -EINVAL;
1866
1867         i915_gem_object_flush_gpu_write_domain(obj);
1868         /* Wait on any GPU rendering and flushing to occur. */
1869         ret = i915_gem_object_wait_rendering(obj);
1870         if (ret != 0)
1871                 return ret;
1872
1873         /* If we're writing through the GTT domain, then CPU and GPU caches
1874          * will need to be invalidated at next use.
1875          */
1876         if (write)
1877                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1878
1879         i915_gem_object_flush_cpu_write_domain(obj);
1880
1881         /* It should now be out of any other write domains, and we can update
1882          * the domain values for our changes.
1883          */
1884         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1885         obj->read_domains |= I915_GEM_DOMAIN_GTT;
1886         if (write) {
1887                 obj->write_domain = I915_GEM_DOMAIN_GTT;
1888                 obj_priv->dirty = 1;
1889         }
1890
1891         return 0;
1892 }
1893
1894 /**
1895  * Moves a single object to the CPU read, and possibly write domain.
1896  *
1897  * This function returns when the move is complete, including waiting on
1898  * flushes to occur.
1899  */
1900 static int
1901 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1902 {
1903         struct drm_device *dev = obj->dev;
1904         int ret;
1905
1906         i915_gem_object_flush_gpu_write_domain(obj);
1907         /* Wait on any GPU rendering and flushing to occur. */
1908         ret = i915_gem_object_wait_rendering(obj);
1909         if (ret != 0)
1910                 return ret;
1911
1912         i915_gem_object_flush_gtt_write_domain(obj);
1913
1914         /* If we have a partially-valid cache of the object in the CPU,
1915          * finish invalidating it and free the per-page flags.
1916          */
1917         i915_gem_object_set_to_full_cpu_read_domain(obj);
1918
1919         /* Flush the CPU cache if it's still invalid. */
1920         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1921                 i915_gem_clflush_object(obj);
1922                 drm_agp_chipset_flush(dev);
1923
1924                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1925         }
1926
1927         /* It should now be out of any other write domains, and we can update
1928          * the domain values for our changes.
1929          */
1930         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1931
1932         /* If we're writing through the CPU, then the GPU read domains will
1933          * need to be invalidated at next use.
1934          */
1935         if (write) {
1936                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1937                 obj->write_domain = I915_GEM_DOMAIN_CPU;
1938         }
1939
1940         return 0;
1941 }
1942
1943 /*
1944  * Set the next domain for the specified object. This
1945  * may not actually perform the necessary flushing/invaliding though,
1946  * as that may want to be batched with other set_domain operations
1947  *
1948  * This is (we hope) the only really tricky part of gem. The goal
1949  * is fairly simple -- track which caches hold bits of the object
1950  * and make sure they remain coherent. A few concrete examples may
1951  * help to explain how it works. For shorthand, we use the notation
1952  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1953  * a pair of read and write domain masks.
1954  *
1955  * Case 1: the batch buffer
1956  *
1957  *      1. Allocated
1958  *      2. Written by CPU
1959  *      3. Mapped to GTT
1960  *      4. Read by GPU
1961  *      5. Unmapped from GTT
1962  *      6. Freed
1963  *
1964  *      Let's take these a step at a time
1965  *
1966  *      1. Allocated
1967  *              Pages allocated from the kernel may still have
1968  *              cache contents, so we set them to (CPU, CPU) always.
1969  *      2. Written by CPU (using pwrite)
1970  *              The pwrite function calls set_domain (CPU, CPU) and
1971  *              this function does nothing (as nothing changes)
1972  *      3. Mapped by GTT
1973  *              This function asserts that the object is not
1974  *              currently in any GPU-based read or write domains
1975  *      4. Read by GPU
1976  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
1977  *              As write_domain is zero, this function adds in the
1978  *              current read domains (CPU+COMMAND, 0).
1979  *              flush_domains is set to CPU.
1980  *              invalidate_domains is set to COMMAND
1981  *              clflush is run to get data out of the CPU caches
1982  *              then i915_dev_set_domain calls i915_gem_flush to
1983  *              emit an MI_FLUSH and drm_agp_chipset_flush
1984  *      5. Unmapped from GTT
1985  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
1986  *              flush_domains and invalidate_domains end up both zero
1987  *              so no flushing/invalidating happens
1988  *      6. Freed
1989  *              yay, done
1990  *
1991  * Case 2: The shared render buffer
1992  *
1993  *      1. Allocated
1994  *      2. Mapped to GTT
1995  *      3. Read/written by GPU
1996  *      4. set_domain to (CPU,CPU)
1997  *      5. Read/written by CPU
1998  *      6. Read/written by GPU
1999  *
2000  *      1. Allocated
2001  *              Same as last example, (CPU, CPU)
2002  *      2. Mapped to GTT
2003  *              Nothing changes (assertions find that it is not in the GPU)
2004  *      3. Read/written by GPU
2005  *              execbuffer calls set_domain (RENDER, RENDER)
2006  *              flush_domains gets CPU
2007  *              invalidate_domains gets GPU
2008  *              clflush (obj)
2009  *              MI_FLUSH and drm_agp_chipset_flush
2010  *      4. set_domain (CPU, CPU)
2011  *              flush_domains gets GPU
2012  *              invalidate_domains gets CPU
2013  *              wait_rendering (obj) to make sure all drawing is complete.
2014  *              This will include an MI_FLUSH to get the data from GPU
2015  *              to memory
2016  *              clflush (obj) to invalidate the CPU cache
2017  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2018  *      5. Read/written by CPU
2019  *              cache lines are loaded and dirtied
2020  *      6. Read written by GPU
2021  *              Same as last GPU access
2022  *
2023  * Case 3: The constant buffer
2024  *
2025  *      1. Allocated
2026  *      2. Written by CPU
2027  *      3. Read by GPU
2028  *      4. Updated (written) by CPU again
2029  *      5. Read by GPU
2030  *
2031  *      1. Allocated
2032  *              (CPU, CPU)
2033  *      2. Written by CPU
2034  *              (CPU, CPU)
2035  *      3. Read by GPU
2036  *              (CPU+RENDER, 0)
2037  *              flush_domains = CPU
2038  *              invalidate_domains = RENDER
2039  *              clflush (obj)
2040  *              MI_FLUSH
2041  *              drm_agp_chipset_flush
2042  *      4. Updated (written) by CPU again
2043  *              (CPU, CPU)
2044  *              flush_domains = 0 (no previous write domain)
2045  *              invalidate_domains = 0 (no new read domains)
2046  *      5. Read by GPU
2047  *              (CPU+RENDER, 0)
2048  *              flush_domains = CPU
2049  *              invalidate_domains = RENDER
2050  *              clflush (obj)
2051  *              MI_FLUSH
2052  *              drm_agp_chipset_flush
2053  */
2054 static void
2055 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2056 {
2057         struct drm_device               *dev = obj->dev;
2058         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2059         uint32_t                        invalidate_domains = 0;
2060         uint32_t                        flush_domains = 0;
2061
2062         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2063         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2064
2065 #if WATCH_BUF
2066         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2067                  __func__, obj,
2068                  obj->read_domains, obj->pending_read_domains,
2069                  obj->write_domain, obj->pending_write_domain);
2070 #endif
2071         /*
2072          * If the object isn't moving to a new write domain,
2073          * let the object stay in multiple read domains
2074          */
2075         if (obj->pending_write_domain == 0)
2076                 obj->pending_read_domains |= obj->read_domains;
2077         else
2078                 obj_priv->dirty = 1;
2079
2080         /*
2081          * Flush the current write domain if
2082          * the new read domains don't match. Invalidate
2083          * any read domains which differ from the old
2084          * write domain
2085          */
2086         if (obj->write_domain &&
2087             obj->write_domain != obj->pending_read_domains) {
2088                 flush_domains |= obj->write_domain;
2089                 invalidate_domains |=
2090                         obj->pending_read_domains & ~obj->write_domain;
2091         }
2092         /*
2093          * Invalidate any read caches which may have
2094          * stale data. That is, any new read domains.
2095          */
2096         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2097         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2098 #if WATCH_BUF
2099                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2100                          __func__, flush_domains, invalidate_domains);
2101 #endif
2102                 i915_gem_clflush_object(obj);
2103         }
2104
2105         /* The actual obj->write_domain will be updated with
2106          * pending_write_domain after we emit the accumulated flush for all
2107          * of our domain changes in execbuffers (which clears objects'
2108          * write_domains).  So if we have a current write domain that we
2109          * aren't changing, set pending_write_domain to that.
2110          */
2111         if (flush_domains == 0 && obj->pending_write_domain == 0)
2112                 obj->pending_write_domain = obj->write_domain;
2113         obj->read_domains = obj->pending_read_domains;
2114
2115         dev->invalidate_domains |= invalidate_domains;
2116         dev->flush_domains |= flush_domains;
2117 #if WATCH_BUF
2118         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2119                  __func__,
2120                  obj->read_domains, obj->write_domain,
2121                  dev->invalidate_domains, dev->flush_domains);
2122 #endif
2123 }
2124
2125 /**
2126  * Moves the object from a partially CPU read to a full one.
2127  *
2128  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2129  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2130  */
2131 static void
2132 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2133 {
2134         struct drm_device *dev = obj->dev;
2135         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2136
2137         if (!obj_priv->page_cpu_valid)
2138                 return;
2139
2140         /* If we're partially in the CPU read domain, finish moving it in.
2141          */
2142         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2143                 int i;
2144
2145                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2146                         if (obj_priv->page_cpu_valid[i])
2147                                 continue;
2148                         drm_clflush_pages(obj_priv->page_list + i, 1);
2149                 }
2150                 drm_agp_chipset_flush(dev);
2151         }
2152
2153         /* Free the page_cpu_valid mappings which are now stale, whether
2154          * or not we've got I915_GEM_DOMAIN_CPU.
2155          */
2156         drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2157                  DRM_MEM_DRIVER);
2158         obj_priv->page_cpu_valid = NULL;
2159 }
2160
2161 /**
2162  * Set the CPU read domain on a range of the object.
2163  *
2164  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2165  * not entirely valid.  The page_cpu_valid member of the object flags which
2166  * pages have been flushed, and will be respected by
2167  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2168  * of the whole object.
2169  *
2170  * This function returns when the move is complete, including waiting on
2171  * flushes to occur.
2172  */
2173 static int
2174 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2175                                           uint64_t offset, uint64_t size)
2176 {
2177         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2178         int i, ret;
2179
2180         if (offset == 0 && size == obj->size)
2181                 return i915_gem_object_set_to_cpu_domain(obj, 0);
2182
2183         i915_gem_object_flush_gpu_write_domain(obj);
2184         /* Wait on any GPU rendering and flushing to occur. */
2185         ret = i915_gem_object_wait_rendering(obj);
2186         if (ret != 0)
2187                 return ret;
2188         i915_gem_object_flush_gtt_write_domain(obj);
2189
2190         /* If we're already fully in the CPU read domain, we're done. */
2191         if (obj_priv->page_cpu_valid == NULL &&
2192             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2193                 return 0;
2194
2195         /* Otherwise, create/clear the per-page CPU read domain flag if we're
2196          * newly adding I915_GEM_DOMAIN_CPU
2197          */
2198         if (obj_priv->page_cpu_valid == NULL) {
2199                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2200                                                       DRM_MEM_DRIVER);
2201                 if (obj_priv->page_cpu_valid == NULL)
2202                         return -ENOMEM;
2203         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2204                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2205
2206         /* Flush the cache on any pages that are still invalid from the CPU's
2207          * perspective.
2208          */
2209         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2210              i++) {
2211                 if (obj_priv->page_cpu_valid[i])
2212                         continue;
2213
2214                 drm_clflush_pages(obj_priv->page_list + i, 1);
2215
2216                 obj_priv->page_cpu_valid[i] = 1;
2217         }
2218
2219         /* It should now be out of any other write domains, and we can update
2220          * the domain values for our changes.
2221          */
2222         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2223
2224         obj->read_domains |= I915_GEM_DOMAIN_CPU;
2225
2226         return 0;
2227 }
2228
2229 /**
2230  * Pin an object to the GTT and evaluate the relocations landing in it.
2231  */
2232 static int
2233 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2234                                  struct drm_file *file_priv,
2235                                  struct drm_i915_gem_exec_object *entry)
2236 {
2237         struct drm_device *dev = obj->dev;
2238         drm_i915_private_t *dev_priv = dev->dev_private;
2239         struct drm_i915_gem_relocation_entry reloc;
2240         struct drm_i915_gem_relocation_entry __user *relocs;
2241         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2242         int i, ret;
2243         void __iomem *reloc_page;
2244
2245         /* Choose the GTT offset for our buffer and put it there. */
2246         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2247         if (ret)
2248                 return ret;
2249
2250         entry->offset = obj_priv->gtt_offset;
2251
2252         relocs = (struct drm_i915_gem_relocation_entry __user *)
2253                  (uintptr_t) entry->relocs_ptr;
2254         /* Apply the relocations, using the GTT aperture to avoid cache
2255          * flushing requirements.
2256          */
2257         for (i = 0; i < entry->relocation_count; i++) {
2258                 struct drm_gem_object *target_obj;
2259                 struct drm_i915_gem_object *target_obj_priv;
2260                 uint32_t reloc_val, reloc_offset;
2261                 uint32_t __iomem *reloc_entry;
2262
2263                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2264                 if (ret != 0) {
2265                         i915_gem_object_unpin(obj);
2266                         return ret;
2267                 }
2268
2269                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2270                                                    reloc.target_handle);
2271                 if (target_obj == NULL) {
2272                         i915_gem_object_unpin(obj);
2273                         return -EBADF;
2274                 }
2275                 target_obj_priv = target_obj->driver_private;
2276
2277                 /* The target buffer should have appeared before us in the
2278                  * exec_object list, so it should have a GTT space bound by now.
2279                  */
2280                 if (target_obj_priv->gtt_space == NULL) {
2281                         DRM_ERROR("No GTT space found for object %d\n",
2282                                   reloc.target_handle);
2283                         drm_gem_object_unreference(target_obj);
2284                         i915_gem_object_unpin(obj);
2285                         return -EINVAL;
2286                 }
2287
2288                 if (reloc.offset > obj->size - 4) {
2289                         DRM_ERROR("Relocation beyond object bounds: "
2290                                   "obj %p target %d offset %d size %d.\n",
2291                                   obj, reloc.target_handle,
2292                                   (int) reloc.offset, (int) obj->size);
2293                         drm_gem_object_unreference(target_obj);
2294                         i915_gem_object_unpin(obj);
2295                         return -EINVAL;
2296                 }
2297                 if (reloc.offset & 3) {
2298                         DRM_ERROR("Relocation not 4-byte aligned: "
2299                                   "obj %p target %d offset %d.\n",
2300                                   obj, reloc.target_handle,
2301                                   (int) reloc.offset);
2302                         drm_gem_object_unreference(target_obj);
2303                         i915_gem_object_unpin(obj);
2304                         return -EINVAL;
2305                 }
2306
2307                 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
2308                     reloc.read_domains & I915_GEM_DOMAIN_CPU) {
2309                         DRM_ERROR("reloc with read/write CPU domains: "
2310                                   "obj %p target %d offset %d "
2311                                   "read %08x write %08x",
2312                                   obj, reloc.target_handle,
2313                                   (int) reloc.offset,
2314                                   reloc.read_domains,
2315                                   reloc.write_domain);
2316                         drm_gem_object_unreference(target_obj);
2317                         i915_gem_object_unpin(obj);
2318                         return -EINVAL;
2319                 }
2320
2321                 if (reloc.write_domain && target_obj->pending_write_domain &&
2322                     reloc.write_domain != target_obj->pending_write_domain) {
2323                         DRM_ERROR("Write domain conflict: "
2324                                   "obj %p target %d offset %d "
2325                                   "new %08x old %08x\n",
2326                                   obj, reloc.target_handle,
2327                                   (int) reloc.offset,
2328                                   reloc.write_domain,
2329                                   target_obj->pending_write_domain);
2330                         drm_gem_object_unreference(target_obj);
2331                         i915_gem_object_unpin(obj);
2332                         return -EINVAL;
2333                 }
2334
2335 #if WATCH_RELOC
2336                 DRM_INFO("%s: obj %p offset %08x target %d "
2337                          "read %08x write %08x gtt %08x "
2338                          "presumed %08x delta %08x\n",
2339                          __func__,
2340                          obj,
2341                          (int) reloc.offset,
2342                          (int) reloc.target_handle,
2343                          (int) reloc.read_domains,
2344                          (int) reloc.write_domain,
2345                          (int) target_obj_priv->gtt_offset,
2346                          (int) reloc.presumed_offset,
2347                          reloc.delta);
2348 #endif
2349
2350                 target_obj->pending_read_domains |= reloc.read_domains;
2351                 target_obj->pending_write_domain |= reloc.write_domain;
2352
2353                 /* If the relocation already has the right value in it, no
2354                  * more work needs to be done.
2355                  */
2356                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
2357                         drm_gem_object_unreference(target_obj);
2358                         continue;
2359                 }
2360
2361                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2362                 if (ret != 0) {
2363                         drm_gem_object_unreference(target_obj);
2364                         i915_gem_object_unpin(obj);
2365                         return -EINVAL;
2366                 }
2367
2368                 /* Map the page containing the relocation we're going to
2369                  * perform.
2370                  */
2371                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
2372                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2373                                                       (reloc_offset &
2374                                                        ~(PAGE_SIZE - 1)));
2375                 reloc_entry = (uint32_t __iomem *)(reloc_page +
2376                                                    (reloc_offset & (PAGE_SIZE - 1)));
2377                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
2378
2379 #if WATCH_BUF
2380                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2381                           obj, (unsigned int) reloc.offset,
2382                           readl(reloc_entry), reloc_val);
2383 #endif
2384                 writel(reloc_val, reloc_entry);
2385                 io_mapping_unmap_atomic(reloc_page);
2386
2387                 /* Write the updated presumed offset for this entry back out
2388                  * to the user.
2389                  */
2390                 reloc.presumed_offset = target_obj_priv->gtt_offset;
2391                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2392                 if (ret != 0) {
2393                         drm_gem_object_unreference(target_obj);
2394                         i915_gem_object_unpin(obj);
2395                         return ret;
2396                 }
2397
2398                 drm_gem_object_unreference(target_obj);
2399         }
2400
2401 #if WATCH_BUF
2402         if (0)
2403                 i915_gem_dump_object(obj, 128, __func__, ~0);
2404 #endif
2405         return 0;
2406 }
2407
2408 /** Dispatch a batchbuffer to the ring
2409  */
2410 static int
2411 i915_dispatch_gem_execbuffer(struct drm_device *dev,
2412                               struct drm_i915_gem_execbuffer *exec,
2413                               uint64_t exec_offset)
2414 {
2415         drm_i915_private_t *dev_priv = dev->dev_private;
2416         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2417                                              (uintptr_t) exec->cliprects_ptr;
2418         int nbox = exec->num_cliprects;
2419         int i = 0, count;
2420         uint32_t        exec_start, exec_len;
2421         RING_LOCALS;
2422
2423         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
2424         exec_len = (uint32_t) exec->batch_len;
2425
2426         if ((exec_start | exec_len) & 0x7) {
2427                 DRM_ERROR("alignment\n");
2428                 return -EINVAL;
2429         }
2430
2431         if (!exec_start)
2432                 return -EINVAL;
2433
2434         count = nbox ? nbox : 1;
2435
2436         for (i = 0; i < count; i++) {
2437                 if (i < nbox) {
2438                         int ret = i915_emit_box(dev, boxes, i,
2439                                                 exec->DR1, exec->DR4);
2440                         if (ret)
2441                                 return ret;
2442                 }
2443
2444                 if (IS_I830(dev) || IS_845G(dev)) {
2445                         BEGIN_LP_RING(4);
2446                         OUT_RING(MI_BATCH_BUFFER);
2447                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2448                         OUT_RING(exec_start + exec_len - 4);
2449                         OUT_RING(0);
2450                         ADVANCE_LP_RING();
2451                 } else {
2452                         BEGIN_LP_RING(2);
2453                         if (IS_I965G(dev)) {
2454                                 OUT_RING(MI_BATCH_BUFFER_START |
2455                                          (2 << 6) |
2456                                          MI_BATCH_NON_SECURE_I965);
2457                                 OUT_RING(exec_start);
2458                         } else {
2459                                 OUT_RING(MI_BATCH_BUFFER_START |
2460                                          (2 << 6));
2461                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2462                         }
2463                         ADVANCE_LP_RING();
2464                 }
2465         }
2466
2467         /* XXX breadcrumb */
2468         return 0;
2469 }
2470
2471 /* Throttle our rendering by waiting until the ring has completed our requests
2472  * emitted over 20 msec ago.
2473  *
2474  * This should get us reasonable parallelism between CPU and GPU but also
2475  * relatively low latency when blocking on a particular request to finish.
2476  */
2477 static int
2478 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2479 {
2480         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2481         int ret = 0;
2482         uint32_t seqno;
2483
2484         mutex_lock(&dev->struct_mutex);
2485         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
2486         i915_file_priv->mm.last_gem_throttle_seqno =
2487                 i915_file_priv->mm.last_gem_seqno;
2488         if (seqno)
2489                 ret = i915_wait_request(dev, seqno);
2490         mutex_unlock(&dev->struct_mutex);
2491         return ret;
2492 }
2493
2494 int
2495 i915_gem_execbuffer(struct drm_device *dev, void *data,
2496                     struct drm_file *file_priv)
2497 {
2498         drm_i915_private_t *dev_priv = dev->dev_private;
2499         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2500         struct drm_i915_gem_execbuffer *args = data;
2501         struct drm_i915_gem_exec_object *exec_list = NULL;
2502         struct drm_gem_object **object_list = NULL;
2503         struct drm_gem_object *batch_obj;
2504         struct drm_i915_gem_object *obj_priv;
2505         int ret, i, pinned = 0;
2506         uint64_t exec_offset;
2507         uint32_t seqno, flush_domains;
2508         int pin_tries;
2509
2510 #if WATCH_EXEC
2511         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2512                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
2513 #endif
2514
2515         if (args->buffer_count < 1) {
2516                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
2517                 return -EINVAL;
2518         }
2519         /* Copy in the exec list from userland */
2520         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
2521                                DRM_MEM_DRIVER);
2522         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
2523                                  DRM_MEM_DRIVER);
2524         if (exec_list == NULL || object_list == NULL) {
2525                 DRM_ERROR("Failed to allocate exec or object list "
2526                           "for %d buffers\n",
2527                           args->buffer_count);
2528                 ret = -ENOMEM;
2529                 goto pre_mutex_err;
2530         }
2531         ret = copy_from_user(exec_list,
2532                              (struct drm_i915_relocation_entry __user *)
2533                              (uintptr_t) args->buffers_ptr,
2534                              sizeof(*exec_list) * args->buffer_count);
2535         if (ret != 0) {
2536                 DRM_ERROR("copy %d exec entries failed %d\n",
2537                           args->buffer_count, ret);
2538                 goto pre_mutex_err;
2539         }
2540
2541         mutex_lock(&dev->struct_mutex);
2542
2543         i915_verify_inactive(dev, __FILE__, __LINE__);
2544
2545         if (dev_priv->mm.wedged) {
2546                 DRM_ERROR("Execbuf while wedged\n");
2547                 mutex_unlock(&dev->struct_mutex);
2548                 ret = -EIO;
2549                 goto pre_mutex_err;
2550         }
2551
2552         if (dev_priv->mm.suspended) {
2553                 DRM_ERROR("Execbuf while VT-switched.\n");
2554                 mutex_unlock(&dev->struct_mutex);
2555                 ret = -EBUSY;
2556                 goto pre_mutex_err;
2557         }
2558
2559         /* Look up object handles */
2560         for (i = 0; i < args->buffer_count; i++) {
2561                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
2562                                                        exec_list[i].handle);
2563                 if (object_list[i] == NULL) {
2564                         DRM_ERROR("Invalid object handle %d at index %d\n",
2565                                    exec_list[i].handle, i);
2566                         ret = -EBADF;
2567                         goto err;
2568                 }
2569
2570                 obj_priv = object_list[i]->driver_private;
2571                 if (obj_priv->in_execbuffer) {
2572                         DRM_ERROR("Object %p appears more than once in object list\n",
2573                                    object_list[i]);
2574                         ret = -EBADF;
2575                         goto err;
2576                 }
2577                 obj_priv->in_execbuffer = true;
2578         }
2579
2580         /* Pin and relocate */
2581         for (pin_tries = 0; ; pin_tries++) {
2582                 ret = 0;
2583                 for (i = 0; i < args->buffer_count; i++) {
2584                         object_list[i]->pending_read_domains = 0;
2585                         object_list[i]->pending_write_domain = 0;
2586                         ret = i915_gem_object_pin_and_relocate(object_list[i],
2587                                                                file_priv,
2588                                                                &exec_list[i]);
2589                         if (ret)
2590                                 break;
2591                         pinned = i + 1;
2592                 }
2593                 /* success */
2594                 if (ret == 0)
2595                         break;
2596
2597                 /* error other than GTT full, or we've already tried again */
2598                 if (ret != -ENOMEM || pin_tries >= 1) {
2599                         if (ret != -ERESTARTSYS)
2600                                 DRM_ERROR("Failed to pin buffers %d\n", ret);
2601                         goto err;
2602                 }
2603
2604                 /* unpin all of our buffers */
2605                 for (i = 0; i < pinned; i++)
2606                         i915_gem_object_unpin(object_list[i]);
2607                 pinned = 0;
2608
2609                 /* evict everyone we can from the aperture */
2610                 ret = i915_gem_evict_everything(dev);
2611                 if (ret)
2612                         goto err;
2613         }
2614
2615         /* Set the pending read domains for the batch buffer to COMMAND */
2616         batch_obj = object_list[args->buffer_count-1];
2617         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
2618         batch_obj->pending_write_domain = 0;
2619
2620         i915_verify_inactive(dev, __FILE__, __LINE__);
2621
2622         /* Zero the global flush/invalidate flags. These
2623          * will be modified as new domains are computed
2624          * for each object
2625          */
2626         dev->invalidate_domains = 0;
2627         dev->flush_domains = 0;
2628
2629         for (i = 0; i < args->buffer_count; i++) {
2630                 struct drm_gem_object *obj = object_list[i];
2631
2632                 /* Compute new gpu domains and update invalidate/flush */
2633                 i915_gem_object_set_to_gpu_domain(obj);
2634         }
2635
2636         i915_verify_inactive(dev, __FILE__, __LINE__);
2637
2638         if (dev->invalidate_domains | dev->flush_domains) {
2639 #if WATCH_EXEC
2640                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2641                           __func__,
2642                          dev->invalidate_domains,
2643                          dev->flush_domains);
2644 #endif
2645                 i915_gem_flush(dev,
2646                                dev->invalidate_domains,
2647                                dev->flush_domains);
2648                 if (dev->flush_domains)
2649                         (void)i915_add_request(dev, dev->flush_domains);
2650         }
2651
2652         for (i = 0; i < args->buffer_count; i++) {
2653                 struct drm_gem_object *obj = object_list[i];
2654
2655                 obj->write_domain = obj->pending_write_domain;
2656         }
2657
2658         i915_verify_inactive(dev, __FILE__, __LINE__);
2659
2660 #if WATCH_COHERENCY
2661         for (i = 0; i < args->buffer_count; i++) {
2662                 i915_gem_object_check_coherency(object_list[i],
2663                                                 exec_list[i].handle);
2664         }
2665 #endif
2666
2667         exec_offset = exec_list[args->buffer_count - 1].offset;
2668
2669 #if WATCH_EXEC
2670         i915_gem_dump_object(object_list[args->buffer_count - 1],
2671                               args->batch_len,
2672                               __func__,
2673                               ~0);
2674 #endif
2675
2676         /* Exec the batchbuffer */
2677         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
2678         if (ret) {
2679                 DRM_ERROR("dispatch failed %d\n", ret);
2680                 goto err;
2681         }
2682
2683         /*
2684          * Ensure that the commands in the batch buffer are
2685          * finished before the interrupt fires
2686          */
2687         flush_domains = i915_retire_commands(dev);
2688
2689         i915_verify_inactive(dev, __FILE__, __LINE__);
2690
2691         /*
2692          * Get a seqno representing the execution of the current buffer,
2693          * which we can wait on.  We would like to mitigate these interrupts,
2694          * likely by only creating seqnos occasionally (so that we have
2695          * *some* interrupts representing completion of buffers that we can
2696          * wait on when trying to clear up gtt space).
2697          */
2698         seqno = i915_add_request(dev, flush_domains);
2699         BUG_ON(seqno == 0);
2700         i915_file_priv->mm.last_gem_seqno = seqno;
2701         for (i = 0; i < args->buffer_count; i++) {
2702                 struct drm_gem_object *obj = object_list[i];
2703
2704                 i915_gem_object_move_to_active(obj, seqno);
2705 #if WATCH_LRU
2706                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
2707 #endif
2708         }
2709 #if WATCH_LRU
2710         i915_dump_lru(dev, __func__);
2711 #endif
2712
2713         i915_verify_inactive(dev, __FILE__, __LINE__);
2714
2715 err:
2716         for (i = 0; i < pinned; i++)
2717                 i915_gem_object_unpin(object_list[i]);
2718
2719         for (i = 0; i < args->buffer_count; i++) {
2720                 if (object_list[i]) {
2721                         obj_priv = object_list[i]->driver_private;
2722                         obj_priv->in_execbuffer = false;
2723                 }
2724                 drm_gem_object_unreference(object_list[i]);
2725         }
2726
2727         mutex_unlock(&dev->struct_mutex);
2728
2729         if (!ret) {
2730                 /* Copy the new buffer offsets back to the user's exec list. */
2731                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2732                                    (uintptr_t) args->buffers_ptr,
2733                                    exec_list,
2734                                    sizeof(*exec_list) * args->buffer_count);
2735                 if (ret)
2736                         DRM_ERROR("failed to copy %d exec entries "
2737                                   "back to user (%d)\n",
2738                                   args->buffer_count, ret);
2739         }
2740
2741 pre_mutex_err:
2742         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2743                  DRM_MEM_DRIVER);
2744         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2745                  DRM_MEM_DRIVER);
2746
2747         return ret;
2748 }
2749
2750 int
2751 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2752 {
2753         struct drm_device *dev = obj->dev;
2754         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2755         int ret;
2756
2757         i915_verify_inactive(dev, __FILE__, __LINE__);
2758         if (obj_priv->gtt_space == NULL) {
2759                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2760                 if (ret != 0) {
2761                         if (ret != -EBUSY && ret != -ERESTARTSYS)
2762                                 DRM_ERROR("Failure to bind: %d\n", ret);
2763                         return ret;
2764                 }
2765         }
2766         /*
2767          * Pre-965 chips need a fence register set up in order to
2768          * properly handle tiled surfaces.
2769          */
2770         if (!IS_I965G(dev) &&
2771             obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2772             obj_priv->tiling_mode != I915_TILING_NONE) {
2773                 ret = i915_gem_object_get_fence_reg(obj, true);
2774                 if (ret != 0) {
2775                         if (ret != -EBUSY && ret != -ERESTARTSYS)
2776                                 DRM_ERROR("Failure to install fence: %d\n",
2777                                           ret);
2778                         return ret;
2779                 }
2780         }
2781         obj_priv->pin_count++;
2782
2783         /* If the object is not active and not pending a flush,
2784          * remove it from the inactive list
2785          */
2786         if (obj_priv->pin_count == 1) {
2787                 atomic_inc(&dev->pin_count);
2788                 atomic_add(obj->size, &dev->pin_memory);
2789                 if (!obj_priv->active &&
2790                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2791                                            I915_GEM_DOMAIN_GTT)) == 0 &&
2792                     !list_empty(&obj_priv->list))
2793                         list_del_init(&obj_priv->list);
2794         }
2795         i915_verify_inactive(dev, __FILE__, __LINE__);
2796
2797         return 0;
2798 }
2799
2800 void
2801 i915_gem_object_unpin(struct drm_gem_object *obj)
2802 {
2803         struct drm_device *dev = obj->dev;
2804         drm_i915_private_t *dev_priv = dev->dev_private;
2805         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2806
2807         i915_verify_inactive(dev, __FILE__, __LINE__);
2808         obj_priv->pin_count--;
2809         BUG_ON(obj_priv->pin_count < 0);
2810         BUG_ON(obj_priv->gtt_space == NULL);
2811
2812         /* If the object is no longer pinned, and is
2813          * neither active nor being flushed, then stick it on
2814          * the inactive list
2815          */
2816         if (obj_priv->pin_count == 0) {
2817                 if (!obj_priv->active &&
2818                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2819                                            I915_GEM_DOMAIN_GTT)) == 0)
2820                         list_move_tail(&obj_priv->list,
2821                                        &dev_priv->mm.inactive_list);
2822                 atomic_dec(&dev->pin_count);
2823                 atomic_sub(obj->size, &dev->pin_memory);
2824         }
2825         i915_verify_inactive(dev, __FILE__, __LINE__);
2826 }
2827
2828 int
2829 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2830                    struct drm_file *file_priv)
2831 {
2832         struct drm_i915_gem_pin *args = data;
2833         struct drm_gem_object *obj;
2834         struct drm_i915_gem_object *obj_priv;
2835         int ret;
2836
2837         mutex_lock(&dev->struct_mutex);
2838
2839         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2840         if (obj == NULL) {
2841                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2842                           args->handle);
2843                 mutex_unlock(&dev->struct_mutex);
2844                 return -EBADF;
2845         }
2846         obj_priv = obj->driver_private;
2847
2848         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2849                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2850                           args->handle);
2851                 drm_gem_object_unreference(obj);
2852                 mutex_unlock(&dev->struct_mutex);
2853                 return -EINVAL;
2854         }
2855
2856         obj_priv->user_pin_count++;
2857         obj_priv->pin_filp = file_priv;
2858         if (obj_priv->user_pin_count == 1) {
2859                 ret = i915_gem_object_pin(obj, args->alignment);
2860                 if (ret != 0) {
2861                         drm_gem_object_unreference(obj);
2862                         mutex_unlock(&dev->struct_mutex);
2863                         return ret;
2864                 }
2865         }
2866
2867         /* XXX - flush the CPU caches for pinned objects
2868          * as the X server doesn't manage domains yet
2869          */
2870         i915_gem_object_flush_cpu_write_domain(obj);
2871         args->offset = obj_priv->gtt_offset;
2872         drm_gem_object_unreference(obj);
2873         mutex_unlock(&dev->struct_mutex);
2874
2875         return 0;
2876 }
2877
2878 int
2879 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2880                      struct drm_file *file_priv)
2881 {
2882         struct drm_i915_gem_pin *args = data;
2883         struct drm_gem_object *obj;
2884         struct drm_i915_gem_object *obj_priv;
2885
2886         mutex_lock(&dev->struct_mutex);
2887
2888         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2889         if (obj == NULL) {
2890                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2891                           args->handle);
2892                 mutex_unlock(&dev->struct_mutex);
2893                 return -EBADF;
2894         }
2895
2896         obj_priv = obj->driver_private;
2897         if (obj_priv->pin_filp != file_priv) {
2898                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2899                           args->handle);
2900                 drm_gem_object_unreference(obj);
2901                 mutex_unlock(&dev->struct_mutex);
2902                 return -EINVAL;
2903         }
2904         obj_priv->user_pin_count--;
2905         if (obj_priv->user_pin_count == 0) {
2906                 obj_priv->pin_filp = NULL;
2907                 i915_gem_object_unpin(obj);
2908         }
2909
2910         drm_gem_object_unreference(obj);
2911         mutex_unlock(&dev->struct_mutex);
2912         return 0;
2913 }
2914
2915 int
2916 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2917                     struct drm_file *file_priv)
2918 {
2919         struct drm_i915_gem_busy *args = data;
2920         struct drm_gem_object *obj;
2921         struct drm_i915_gem_object *obj_priv;
2922
2923         mutex_lock(&dev->struct_mutex);
2924         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2925         if (obj == NULL) {
2926                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2927                           args->handle);
2928                 mutex_unlock(&dev->struct_mutex);
2929                 return -EBADF;
2930         }
2931
2932         /* Update the active list for the hardware's current position.
2933          * Otherwise this only updates on a delayed timer or when irqs are
2934          * actually unmasked, and our working set ends up being larger than
2935          * required.
2936          */
2937         i915_gem_retire_requests(dev);
2938
2939         obj_priv = obj->driver_private;
2940         /* Don't count being on the flushing list against the object being
2941          * done.  Otherwise, a buffer left on the flushing list but not getting
2942          * flushed (because nobody's flushing that domain) won't ever return
2943          * unbusy and get reused by libdrm's bo cache.  The other expected
2944          * consumer of this interface, OpenGL's occlusion queries, also specs
2945          * that the objects get unbusy "eventually" without any interference.
2946          */
2947         args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
2948
2949         drm_gem_object_unreference(obj);
2950         mutex_unlock(&dev->struct_mutex);
2951         return 0;
2952 }
2953
2954 int
2955 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2956                         struct drm_file *file_priv)
2957 {
2958     return i915_gem_ring_throttle(dev, file_priv);
2959 }
2960
2961 int i915_gem_init_object(struct drm_gem_object *obj)
2962 {
2963         struct drm_i915_gem_object *obj_priv;
2964
2965         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2966         if (obj_priv == NULL)
2967                 return -ENOMEM;
2968
2969         /*
2970          * We've just allocated pages from the kernel,
2971          * so they've just been written by the CPU with
2972          * zeros. They'll need to be clflushed before we
2973          * use them with the GPU.
2974          */
2975         obj->write_domain = I915_GEM_DOMAIN_CPU;
2976         obj->read_domains = I915_GEM_DOMAIN_CPU;
2977
2978         obj_priv->agp_type = AGP_USER_MEMORY;
2979
2980         obj->driver_private = obj_priv;
2981         obj_priv->obj = obj;
2982         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2983         INIT_LIST_HEAD(&obj_priv->list);
2984
2985         return 0;
2986 }
2987
2988 void i915_gem_free_object(struct drm_gem_object *obj)
2989 {
2990         struct drm_device *dev = obj->dev;
2991         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2992
2993         while (obj_priv->pin_count > 0)
2994                 i915_gem_object_unpin(obj);
2995
2996         if (obj_priv->phys_obj)
2997                 i915_gem_detach_phys_object(dev, obj);
2998
2999         i915_gem_object_unbind(obj);
3000
3001         i915_gem_free_mmap_offset(obj);
3002
3003         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3004         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3005 }
3006
3007 /** Unbinds all objects that are on the given buffer list. */
3008 static int
3009 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3010 {
3011         struct drm_gem_object *obj;
3012         struct drm_i915_gem_object *obj_priv;
3013         int ret;
3014
3015         while (!list_empty(head)) {
3016                 obj_priv = list_first_entry(head,
3017                                             struct drm_i915_gem_object,
3018                                             list);
3019                 obj = obj_priv->obj;
3020
3021                 if (obj_priv->pin_count != 0) {
3022                         DRM_ERROR("Pinned object in unbind list\n");
3023                         mutex_unlock(&dev->struct_mutex);
3024                         return -EINVAL;
3025                 }
3026
3027                 ret = i915_gem_object_unbind(obj);
3028                 if (ret != 0) {
3029                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3030                                   ret);
3031                         mutex_unlock(&dev->struct_mutex);
3032                         return ret;
3033                 }
3034         }
3035
3036
3037         return 0;
3038 }
3039
3040 int
3041 i915_gem_idle(struct drm_device *dev)
3042 {
3043         drm_i915_private_t *dev_priv = dev->dev_private;
3044         uint32_t seqno, cur_seqno, last_seqno;
3045         int stuck, ret;
3046
3047         mutex_lock(&dev->struct_mutex);
3048
3049         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3050                 mutex_unlock(&dev->struct_mutex);
3051                 return 0;
3052         }
3053
3054         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3055          * We need to replace this with a semaphore, or something.
3056          */
3057         dev_priv->mm.suspended = 1;
3058
3059         /* Cancel the retire work handler, wait for it to finish if running
3060          */
3061         mutex_unlock(&dev->struct_mutex);
3062         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3063         mutex_lock(&dev->struct_mutex);
3064
3065         i915_kernel_lost_context(dev);
3066
3067         /* Flush the GPU along with all non-CPU write domains
3068          */
3069         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3070                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
3071         seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3072
3073         if (seqno == 0) {
3074                 mutex_unlock(&dev->struct_mutex);
3075                 return -ENOMEM;
3076         }
3077
3078         dev_priv->mm.waiting_gem_seqno = seqno;
3079         last_seqno = 0;
3080         stuck = 0;
3081         for (;;) {
3082                 cur_seqno = i915_get_gem_seqno(dev);
3083                 if (i915_seqno_passed(cur_seqno, seqno))
3084                         break;
3085                 if (last_seqno == cur_seqno) {
3086                         if (stuck++ > 100) {
3087                                 DRM_ERROR("hardware wedged\n");
3088                                 dev_priv->mm.wedged = 1;
3089                                 DRM_WAKEUP(&dev_priv->irq_queue);
3090                                 break;
3091                         }
3092                 }
3093                 msleep(10);
3094                 last_seqno = cur_seqno;
3095         }
3096         dev_priv->mm.waiting_gem_seqno = 0;
3097
3098         i915_gem_retire_requests(dev);
3099
3100         if (!dev_priv->mm.wedged) {
3101                 /* Active and flushing should now be empty as we've
3102                  * waited for a sequence higher than any pending execbuffer
3103                  */
3104                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3105                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3106                 /* Request should now be empty as we've also waited
3107                  * for the last request in the list
3108                  */
3109                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3110         }
3111
3112         /* Empty the active and flushing lists to inactive.  If there's
3113          * anything left at this point, it means that we're wedged and
3114          * nothing good's going to happen by leaving them there.  So strip
3115          * the GPU domains and just stuff them onto inactive.
3116          */
3117         while (!list_empty(&dev_priv->mm.active_list)) {
3118                 struct drm_i915_gem_object *obj_priv;
3119
3120                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3121                                             struct drm_i915_gem_object,
3122                                             list);
3123                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3124                 i915_gem_object_move_to_inactive(obj_priv->obj);
3125         }
3126
3127         while (!list_empty(&dev_priv->mm.flushing_list)) {
3128                 struct drm_i915_gem_object *obj_priv;
3129
3130                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3131                                             struct drm_i915_gem_object,
3132                                             list);
3133                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3134                 i915_gem_object_move_to_inactive(obj_priv->obj);
3135         }
3136
3137
3138         /* Move all inactive buffers out of the GTT. */
3139         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3140         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3141         if (ret) {
3142                 mutex_unlock(&dev->struct_mutex);
3143                 return ret;
3144         }
3145
3146         i915_gem_cleanup_ringbuffer(dev);
3147         mutex_unlock(&dev->struct_mutex);
3148
3149         return 0;
3150 }
3151
3152 static int
3153 i915_gem_init_hws(struct drm_device *dev)
3154 {
3155         drm_i915_private_t *dev_priv = dev->dev_private;
3156         struct drm_gem_object *obj;
3157         struct drm_i915_gem_object *obj_priv;
3158         int ret;
3159
3160         /* If we need a physical address for the status page, it's already
3161          * initialized at driver load time.
3162          */
3163         if (!I915_NEED_GFX_HWS(dev))
3164                 return 0;
3165
3166         obj = drm_gem_object_alloc(dev, 4096);
3167         if (obj == NULL) {
3168                 DRM_ERROR("Failed to allocate status page\n");
3169                 return -ENOMEM;
3170         }
3171         obj_priv = obj->driver_private;
3172         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
3173
3174         ret = i915_gem_object_pin(obj, 4096);
3175         if (ret != 0) {
3176                 drm_gem_object_unreference(obj);
3177                 return ret;
3178         }
3179
3180         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3181
3182         dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
3183         if (dev_priv->hw_status_page == NULL) {
3184                 DRM_ERROR("Failed to map status page.\n");
3185                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3186                 i915_gem_object_unpin(obj);
3187                 drm_gem_object_unreference(obj);
3188                 return -EINVAL;
3189         }
3190         dev_priv->hws_obj = obj;
3191         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3192         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3193         I915_READ(HWS_PGA); /* posting read */
3194         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3195
3196         return 0;
3197 }
3198
3199 static void
3200 i915_gem_cleanup_hws(struct drm_device *dev)
3201 {
3202         drm_i915_private_t *dev_priv = dev->dev_private;
3203         struct drm_gem_object *obj;
3204         struct drm_i915_gem_object *obj_priv;
3205
3206         if (dev_priv->hws_obj == NULL)
3207                 return;
3208
3209         obj = dev_priv->hws_obj;
3210         obj_priv = obj->driver_private;
3211
3212         kunmap(obj_priv->page_list[0]);
3213         i915_gem_object_unpin(obj);
3214         drm_gem_object_unreference(obj);
3215         dev_priv->hws_obj = NULL;
3216
3217         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3218         dev_priv->hw_status_page = NULL;
3219
3220         /* Write high address into HWS_PGA when disabling. */
3221         I915_WRITE(HWS_PGA, 0x1ffff000);
3222 }
3223
3224 int
3225 i915_gem_init_ringbuffer(struct drm_device *dev)
3226 {
3227         drm_i915_private_t *dev_priv = dev->dev_private;
3228         struct drm_gem_object *obj;
3229         struct drm_i915_gem_object *obj_priv;
3230         drm_i915_ring_buffer_t *ring = &dev_priv->ring;
3231         int ret;
3232         u32 head;
3233
3234         ret = i915_gem_init_hws(dev);
3235         if (ret != 0)
3236                 return ret;
3237
3238         obj = drm_gem_object_alloc(dev, 128 * 1024);
3239         if (obj == NULL) {
3240                 DRM_ERROR("Failed to allocate ringbuffer\n");
3241                 i915_gem_cleanup_hws(dev);
3242                 return -ENOMEM;
3243         }
3244         obj_priv = obj->driver_private;
3245
3246         ret = i915_gem_object_pin(obj, 4096);
3247         if (ret != 0) {
3248                 drm_gem_object_unreference(obj);
3249                 i915_gem_cleanup_hws(dev);
3250                 return ret;
3251         }
3252
3253         /* Set up the kernel mapping for the ring. */
3254         ring->Size = obj->size;
3255         ring->tail_mask = obj->size - 1;
3256
3257         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3258         ring->map.size = obj->size;
3259         ring->map.type = 0;
3260         ring->map.flags = 0;
3261         ring->map.mtrr = 0;
3262
3263         drm_core_ioremap_wc(&ring->map, dev);
3264         if (ring->map.handle == NULL) {
3265                 DRM_ERROR("Failed to map ringbuffer.\n");
3266                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3267                 i915_gem_object_unpin(obj);
3268                 drm_gem_object_unreference(obj);
3269                 i915_gem_cleanup_hws(dev);
3270                 return -EINVAL;
3271         }
3272         ring->ring_obj = obj;
3273         ring->virtual_start = ring->map.handle;
3274
3275         /* Stop the ring if it's running. */
3276         I915_WRITE(PRB0_CTL, 0);
3277         I915_WRITE(PRB0_TAIL, 0);
3278         I915_WRITE(PRB0_HEAD, 0);
3279
3280         /* Initialize the ring. */
3281         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
3282         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3283
3284         /* G45 ring initialization fails to reset head to zero */
3285         if (head != 0) {
3286                 DRM_ERROR("Ring head not reset to zero "
3287                           "ctl %08x head %08x tail %08x start %08x\n",
3288                           I915_READ(PRB0_CTL),
3289                           I915_READ(PRB0_HEAD),
3290                           I915_READ(PRB0_TAIL),
3291                           I915_READ(PRB0_START));
3292                 I915_WRITE(PRB0_HEAD, 0);
3293
3294                 DRM_ERROR("Ring head forced to zero "
3295                           "ctl %08x head %08x tail %08x start %08x\n",
3296                           I915_READ(PRB0_CTL),
3297                           I915_READ(PRB0_HEAD),
3298                           I915_READ(PRB0_TAIL),
3299                           I915_READ(PRB0_START));
3300         }
3301
3302         I915_WRITE(PRB0_CTL,
3303                    ((obj->size - 4096) & RING_NR_PAGES) |
3304                    RING_NO_REPORT |
3305                    RING_VALID);
3306
3307         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3308
3309         /* If the head is still not zero, the ring is dead */
3310         if (head != 0) {
3311                 DRM_ERROR("Ring initialization failed "
3312                           "ctl %08x head %08x tail %08x start %08x\n",
3313                           I915_READ(PRB0_CTL),
3314                           I915_READ(PRB0_HEAD),
3315                           I915_READ(PRB0_TAIL),
3316                           I915_READ(PRB0_START));
3317                 return -EIO;
3318         }
3319
3320         /* Update our cache of the ring state */
3321         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3322                 i915_kernel_lost_context(dev);
3323         else {
3324                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3325                 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
3326                 ring->space = ring->head - (ring->tail + 8);
3327                 if (ring->space < 0)
3328                         ring->space += ring->Size;
3329         }
3330
3331         return 0;
3332 }
3333
3334 void
3335 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3336 {
3337         drm_i915_private_t *dev_priv = dev->dev_private;
3338
3339         if (dev_priv->ring.ring_obj == NULL)
3340                 return;
3341
3342         drm_core_ioremapfree(&dev_priv->ring.map, dev);
3343
3344         i915_gem_object_unpin(dev_priv->ring.ring_obj);
3345         drm_gem_object_unreference(dev_priv->ring.ring_obj);
3346         dev_priv->ring.ring_obj = NULL;
3347         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3348
3349         i915_gem_cleanup_hws(dev);
3350 }
3351
3352 int
3353 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3354                        struct drm_file *file_priv)
3355 {
3356         drm_i915_private_t *dev_priv = dev->dev_private;
3357         int ret;
3358
3359         if (drm_core_check_feature(dev, DRIVER_MODESET))
3360                 return 0;
3361
3362         if (dev_priv->mm.wedged) {
3363                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3364                 dev_priv->mm.wedged = 0;
3365         }
3366
3367         mutex_lock(&dev->struct_mutex);
3368         dev_priv->mm.suspended = 0;
3369
3370         ret = i915_gem_init_ringbuffer(dev);
3371         if (ret != 0)
3372                 return ret;
3373
3374         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3375         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3376         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3377         BUG_ON(!list_empty(&dev_priv->mm.request_list));
3378         mutex_unlock(&dev->struct_mutex);
3379
3380         drm_irq_install(dev);
3381
3382         return 0;
3383 }
3384
3385 int
3386 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3387                        struct drm_file *file_priv)
3388 {
3389         int ret;
3390
3391         if (drm_core_check_feature(dev, DRIVER_MODESET))
3392                 return 0;
3393
3394         ret = i915_gem_idle(dev);
3395         drm_irq_uninstall(dev);
3396
3397         return ret;
3398 }
3399
3400 void
3401 i915_gem_lastclose(struct drm_device *dev)
3402 {
3403         int ret;
3404
3405         if (drm_core_check_feature(dev, DRIVER_MODESET))
3406                 return;
3407
3408         ret = i915_gem_idle(dev);
3409         if (ret)
3410                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3411 }
3412
3413 void
3414 i915_gem_load(struct drm_device *dev)
3415 {
3416         drm_i915_private_t *dev_priv = dev->dev_private;
3417
3418         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3419         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3420         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3421         INIT_LIST_HEAD(&dev_priv->mm.request_list);
3422         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3423                           i915_gem_retire_work_handler);
3424         dev_priv->mm.next_gem_seqno = 1;
3425
3426         /* Old X drivers will take 0-2 for front, back, depth buffers */
3427         dev_priv->fence_reg_start = 3;
3428
3429         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3430                 dev_priv->num_fence_regs = 16;
3431         else
3432                 dev_priv->num_fence_regs = 8;
3433
3434         i915_gem_detect_bit_6_swizzle(dev);
3435 }
3436
3437 /*
3438  * Create a physically contiguous memory object for this object
3439  * e.g. for cursor + overlay regs
3440  */
3441 int i915_gem_init_phys_object(struct drm_device *dev,
3442                               int id, int size)
3443 {
3444         drm_i915_private_t *dev_priv = dev->dev_private;
3445         struct drm_i915_gem_phys_object *phys_obj;
3446         int ret;
3447
3448         if (dev_priv->mm.phys_objs[id - 1] || !size)
3449                 return 0;
3450
3451         phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3452         if (!phys_obj)
3453                 return -ENOMEM;
3454
3455         phys_obj->id = id;
3456
3457         phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
3458         if (!phys_obj->handle) {
3459                 ret = -ENOMEM;
3460                 goto kfree_obj;
3461         }
3462 #ifdef CONFIG_X86
3463         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3464 #endif
3465
3466         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3467
3468         return 0;
3469 kfree_obj:
3470         drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3471         return ret;
3472 }
3473
3474 void i915_gem_free_phys_object(struct drm_device *dev, int id)
3475 {
3476         drm_i915_private_t *dev_priv = dev->dev_private;
3477         struct drm_i915_gem_phys_object *phys_obj;
3478
3479         if (!dev_priv->mm.phys_objs[id - 1])
3480                 return;
3481
3482         phys_obj = dev_priv->mm.phys_objs[id - 1];
3483         if (phys_obj->cur_obj) {
3484                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3485         }
3486
3487 #ifdef CONFIG_X86
3488         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3489 #endif
3490         drm_pci_free(dev, phys_obj->handle);
3491         kfree(phys_obj);
3492         dev_priv->mm.phys_objs[id - 1] = NULL;
3493 }
3494
3495 void i915_gem_free_all_phys_object(struct drm_device *dev)
3496 {
3497         int i;
3498
3499         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3500                 i915_gem_free_phys_object(dev, i);
3501 }
3502
3503 void i915_gem_detach_phys_object(struct drm_device *dev,
3504                                  struct drm_gem_object *obj)
3505 {
3506         struct drm_i915_gem_object *obj_priv;
3507         int i;
3508         int ret;
3509         int page_count;
3510
3511         obj_priv = obj->driver_private;
3512         if (!obj_priv->phys_obj)
3513                 return;
3514
3515         ret = i915_gem_object_get_page_list(obj);
3516         if (ret)
3517                 goto out;
3518
3519         page_count = obj->size / PAGE_SIZE;
3520
3521         for (i = 0; i < page_count; i++) {
3522                 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3523                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3524
3525                 memcpy(dst, src, PAGE_SIZE);
3526                 kunmap_atomic(dst, KM_USER0);
3527         }
3528         drm_clflush_pages(obj_priv->page_list, page_count);
3529         drm_agp_chipset_flush(dev);
3530 out:
3531         obj_priv->phys_obj->cur_obj = NULL;
3532         obj_priv->phys_obj = NULL;
3533 }
3534
3535 int
3536 i915_gem_attach_phys_object(struct drm_device *dev,
3537                             struct drm_gem_object *obj, int id)
3538 {
3539         drm_i915_private_t *dev_priv = dev->dev_private;
3540         struct drm_i915_gem_object *obj_priv;
3541         int ret = 0;
3542         int page_count;
3543         int i;
3544
3545         if (id > I915_MAX_PHYS_OBJECT)
3546                 return -EINVAL;
3547
3548         obj_priv = obj->driver_private;
3549
3550         if (obj_priv->phys_obj) {
3551                 if (obj_priv->phys_obj->id == id)
3552                         return 0;
3553                 i915_gem_detach_phys_object(dev, obj);
3554         }
3555
3556
3557         /* create a new object */
3558         if (!dev_priv->mm.phys_objs[id - 1]) {
3559                 ret = i915_gem_init_phys_object(dev, id,
3560                                                 obj->size);
3561                 if (ret) {
3562                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3563                         goto out;
3564                 }
3565         }
3566
3567         /* bind to the object */
3568         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3569         obj_priv->phys_obj->cur_obj = obj;
3570
3571         ret = i915_gem_object_get_page_list(obj);
3572         if (ret) {
3573                 DRM_ERROR("failed to get page list\n");
3574                 goto out;
3575         }
3576
3577         page_count = obj->size / PAGE_SIZE;
3578
3579         for (i = 0; i < page_count; i++) {
3580                 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3581                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3582
3583                 memcpy(dst, src, PAGE_SIZE);
3584                 kunmap_atomic(src, KM_USER0);
3585         }
3586
3587         return 0;
3588 out:
3589         return ret;
3590 }
3591
3592 static int
3593 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3594                      struct drm_i915_gem_pwrite *args,
3595                      struct drm_file *file_priv)
3596 {
3597         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3598         void *obj_addr;
3599         int ret;
3600         char __user *user_data;
3601
3602         user_data = (char __user *) (uintptr_t) args->data_ptr;
3603         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3604
3605         DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
3606         ret = copy_from_user(obj_addr, user_data, args->size);
3607         if (ret)
3608                 return -EFAULT;
3609
3610         drm_agp_chipset_flush(dev);
3611         return 0;
3612 }