drm/i915: Make a single set-to-cpu-domain path and use it wherever needed.
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33
34 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35
36 static int
37 i915_gem_object_set_domain(struct drm_gem_object *obj,
38                             uint32_t read_domains,
39                             uint32_t write_domain);
40 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44                                              int write);
45 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46                                              int write);
47 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48                                                      uint64_t offset,
49                                                      uint64_t size);
50 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
54
55 static void
56 i915_gem_cleanup_ringbuffer(struct drm_device *dev);
57
58 int
59 i915_gem_init_ioctl(struct drm_device *dev, void *data,
60                     struct drm_file *file_priv)
61 {
62         drm_i915_private_t *dev_priv = dev->dev_private;
63         struct drm_i915_gem_init *args = data;
64
65         mutex_lock(&dev->struct_mutex);
66
67         if (args->gtt_start >= args->gtt_end ||
68             (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
69             (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
70                 mutex_unlock(&dev->struct_mutex);
71                 return -EINVAL;
72         }
73
74         drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
75             args->gtt_end - args->gtt_start);
76
77         dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
78
79         mutex_unlock(&dev->struct_mutex);
80
81         return 0;
82 }
83
84 int
85 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
86                             struct drm_file *file_priv)
87 {
88         struct drm_i915_gem_get_aperture *args = data;
89
90         if (!(dev->driver->driver_features & DRIVER_GEM))
91                 return -ENODEV;
92
93         args->aper_size = dev->gtt_total;
94         args->aper_available_size = (args->aper_size -
95                                      atomic_read(&dev->pin_memory));
96
97         return 0;
98 }
99
100
101 /**
102  * Creates a new mm object and returns a handle to it.
103  */
104 int
105 i915_gem_create_ioctl(struct drm_device *dev, void *data,
106                       struct drm_file *file_priv)
107 {
108         struct drm_i915_gem_create *args = data;
109         struct drm_gem_object *obj;
110         int handle, ret;
111
112         args->size = roundup(args->size, PAGE_SIZE);
113
114         /* Allocate the new object */
115         obj = drm_gem_object_alloc(dev, args->size);
116         if (obj == NULL)
117                 return -ENOMEM;
118
119         ret = drm_gem_handle_create(file_priv, obj, &handle);
120         mutex_lock(&dev->struct_mutex);
121         drm_gem_object_handle_unreference(obj);
122         mutex_unlock(&dev->struct_mutex);
123
124         if (ret)
125                 return ret;
126
127         args->handle = handle;
128
129         return 0;
130 }
131
132 /**
133  * Reads data from the object referenced by handle.
134  *
135  * On error, the contents of *data are undefined.
136  */
137 int
138 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
139                      struct drm_file *file_priv)
140 {
141         struct drm_i915_gem_pread *args = data;
142         struct drm_gem_object *obj;
143         struct drm_i915_gem_object *obj_priv;
144         ssize_t read;
145         loff_t offset;
146         int ret;
147
148         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
149         if (obj == NULL)
150                 return -EBADF;
151         obj_priv = obj->driver_private;
152
153         /* Bounds check source.
154          *
155          * XXX: This could use review for overflow issues...
156          */
157         if (args->offset > obj->size || args->size > obj->size ||
158             args->offset + args->size > obj->size) {
159                 drm_gem_object_unreference(obj);
160                 return -EINVAL;
161         }
162
163         mutex_lock(&dev->struct_mutex);
164
165         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
166                                                         args->size);
167         if (ret != 0) {
168                 drm_gem_object_unreference(obj);
169                 mutex_unlock(&dev->struct_mutex);
170                 return ret;
171         }
172
173         offset = args->offset;
174
175         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
176                         args->size, &offset);
177         if (read != args->size) {
178                 drm_gem_object_unreference(obj);
179                 mutex_unlock(&dev->struct_mutex);
180                 if (read < 0)
181                         return read;
182                 else
183                         return -EINVAL;
184         }
185
186         drm_gem_object_unreference(obj);
187         mutex_unlock(&dev->struct_mutex);
188
189         return 0;
190 }
191
192 /* This is the fast write path which cannot handle
193  * page faults in the source data
194  */
195
196 static inline int
197 fast_user_write(struct io_mapping *mapping,
198                 loff_t page_base, int page_offset,
199                 char __user *user_data,
200                 int length)
201 {
202         char *vaddr_atomic;
203         unsigned long unwritten;
204
205         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
206         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
207                                                       user_data, length);
208         io_mapping_unmap_atomic(vaddr_atomic);
209         if (unwritten)
210                 return -EFAULT;
211         return 0;
212 }
213
214 /* Here's the write path which can sleep for
215  * page faults
216  */
217
218 static inline int
219 slow_user_write(struct io_mapping *mapping,
220                 loff_t page_base, int page_offset,
221                 char __user *user_data,
222                 int length)
223 {
224         char __iomem *vaddr;
225         unsigned long unwritten;
226
227         vaddr = io_mapping_map_wc(mapping, page_base);
228         if (vaddr == NULL)
229                 return -EFAULT;
230         unwritten = __copy_from_user(vaddr + page_offset,
231                                      user_data, length);
232         io_mapping_unmap(vaddr);
233         if (unwritten)
234                 return -EFAULT;
235         return 0;
236 }
237
238 static int
239 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
240                     struct drm_i915_gem_pwrite *args,
241                     struct drm_file *file_priv)
242 {
243         struct drm_i915_gem_object *obj_priv = obj->driver_private;
244         drm_i915_private_t *dev_priv = dev->dev_private;
245         ssize_t remain;
246         loff_t offset, page_base;
247         char __user *user_data;
248         int page_offset, page_length;
249         int ret;
250
251         user_data = (char __user *) (uintptr_t) args->data_ptr;
252         remain = args->size;
253         if (!access_ok(VERIFY_READ, user_data, remain))
254                 return -EFAULT;
255
256
257         mutex_lock(&dev->struct_mutex);
258         ret = i915_gem_object_pin(obj, 0);
259         if (ret) {
260                 mutex_unlock(&dev->struct_mutex);
261                 return ret;
262         }
263         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
264         if (ret)
265                 goto fail;
266
267         obj_priv = obj->driver_private;
268         offset = obj_priv->gtt_offset + args->offset;
269         obj_priv->dirty = 1;
270
271         while (remain > 0) {
272                 /* Operation in this page
273                  *
274                  * page_base = page offset within aperture
275                  * page_offset = offset within page
276                  * page_length = bytes to copy for this page
277                  */
278                 page_base = (offset & ~(PAGE_SIZE-1));
279                 page_offset = offset & (PAGE_SIZE-1);
280                 page_length = remain;
281                 if ((page_offset + remain) > PAGE_SIZE)
282                         page_length = PAGE_SIZE - page_offset;
283
284                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
285                                        page_offset, user_data, page_length);
286
287                 /* If we get a fault while copying data, then (presumably) our
288                  * source page isn't available. In this case, use the
289                  * non-atomic function
290                  */
291                 if (ret) {
292                         ret = slow_user_write (dev_priv->mm.gtt_mapping,
293                                                page_base, page_offset,
294                                                user_data, page_length);
295                         if (ret)
296                                 goto fail;
297                 }
298
299                 remain -= page_length;
300                 user_data += page_length;
301                 offset += page_length;
302         }
303
304 fail:
305         i915_gem_object_unpin(obj);
306         mutex_unlock(&dev->struct_mutex);
307
308         return ret;
309 }
310
311 static int
312 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
313                       struct drm_i915_gem_pwrite *args,
314                       struct drm_file *file_priv)
315 {
316         int ret;
317         loff_t offset;
318         ssize_t written;
319
320         mutex_lock(&dev->struct_mutex);
321
322         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
323         if (ret) {
324                 mutex_unlock(&dev->struct_mutex);
325                 return ret;
326         }
327
328         offset = args->offset;
329
330         written = vfs_write(obj->filp,
331                             (char __user *)(uintptr_t) args->data_ptr,
332                             args->size, &offset);
333         if (written != args->size) {
334                 mutex_unlock(&dev->struct_mutex);
335                 if (written < 0)
336                         return written;
337                 else
338                         return -EINVAL;
339         }
340
341         mutex_unlock(&dev->struct_mutex);
342
343         return 0;
344 }
345
346 /**
347  * Writes data to the object referenced by handle.
348  *
349  * On error, the contents of the buffer that were to be modified are undefined.
350  */
351 int
352 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
353                       struct drm_file *file_priv)
354 {
355         struct drm_i915_gem_pwrite *args = data;
356         struct drm_gem_object *obj;
357         struct drm_i915_gem_object *obj_priv;
358         int ret = 0;
359
360         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
361         if (obj == NULL)
362                 return -EBADF;
363         obj_priv = obj->driver_private;
364
365         /* Bounds check destination.
366          *
367          * XXX: This could use review for overflow issues...
368          */
369         if (args->offset > obj->size || args->size > obj->size ||
370             args->offset + args->size > obj->size) {
371                 drm_gem_object_unreference(obj);
372                 return -EINVAL;
373         }
374
375         /* We can only do the GTT pwrite on untiled buffers, as otherwise
376          * it would end up going through the fenced access, and we'll get
377          * different detiling behavior between reading and writing.
378          * pread/pwrite currently are reading and writing from the CPU
379          * perspective, requiring manual detiling by the client.
380          */
381         if (obj_priv->tiling_mode == I915_TILING_NONE &&
382             dev->gtt_total != 0)
383                 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
384         else
385                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
386
387 #if WATCH_PWRITE
388         if (ret)
389                 DRM_INFO("pwrite failed %d\n", ret);
390 #endif
391
392         drm_gem_object_unreference(obj);
393
394         return ret;
395 }
396
397 /**
398  * Called when user space prepares to use an object with the CPU, either
399  * through the mmap ioctl's mapping or a GTT mapping.
400  */
401 int
402 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
403                           struct drm_file *file_priv)
404 {
405         struct drm_i915_gem_set_domain *args = data;
406         struct drm_gem_object *obj;
407         uint32_t read_domains = args->read_domains;
408         uint32_t write_domain = args->write_domain;
409         int ret;
410
411         if (!(dev->driver->driver_features & DRIVER_GEM))
412                 return -ENODEV;
413
414         /* Only handle setting domains to types used by the CPU. */
415         if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416                 return -EINVAL;
417
418         if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419                 return -EINVAL;
420
421         /* Having something in the write domain implies it's in the read
422          * domain, and only that read domain.  Enforce that in the request.
423          */
424         if (write_domain != 0 && read_domains != write_domain)
425                 return -EINVAL;
426
427         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
428         if (obj == NULL)
429                 return -EBADF;
430
431         mutex_lock(&dev->struct_mutex);
432 #if WATCH_BUF
433         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
434                  obj, obj->size, read_domains, write_domain);
435 #endif
436         if (read_domains & I915_GEM_DOMAIN_GTT) {
437                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438         } else {
439                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
440         }
441
442         drm_gem_object_unreference(obj);
443         mutex_unlock(&dev->struct_mutex);
444         return ret;
445 }
446
447 /**
448  * Called when user space has done writes to this buffer
449  */
450 int
451 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
452                       struct drm_file *file_priv)
453 {
454         struct drm_i915_gem_sw_finish *args = data;
455         struct drm_gem_object *obj;
456         struct drm_i915_gem_object *obj_priv;
457         int ret = 0;
458
459         if (!(dev->driver->driver_features & DRIVER_GEM))
460                 return -ENODEV;
461
462         mutex_lock(&dev->struct_mutex);
463         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
464         if (obj == NULL) {
465                 mutex_unlock(&dev->struct_mutex);
466                 return -EBADF;
467         }
468
469 #if WATCH_BUF
470         DRM_INFO("%s: sw_finish %d (%p %d)\n",
471                  __func__, args->handle, obj, obj->size);
472 #endif
473         obj_priv = obj->driver_private;
474
475         /* Pinned buffers may be scanout, so flush the cache */
476         if (obj_priv->pin_count)
477                 i915_gem_object_flush_cpu_write_domain(obj);
478
479         drm_gem_object_unreference(obj);
480         mutex_unlock(&dev->struct_mutex);
481         return ret;
482 }
483
484 /**
485  * Maps the contents of an object, returning the address it is mapped
486  * into.
487  *
488  * While the mapping holds a reference on the contents of the object, it doesn't
489  * imply a ref on the object itself.
490  */
491 int
492 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
493                    struct drm_file *file_priv)
494 {
495         struct drm_i915_gem_mmap *args = data;
496         struct drm_gem_object *obj;
497         loff_t offset;
498         unsigned long addr;
499
500         if (!(dev->driver->driver_features & DRIVER_GEM))
501                 return -ENODEV;
502
503         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
504         if (obj == NULL)
505                 return -EBADF;
506
507         offset = args->offset;
508
509         down_write(&current->mm->mmap_sem);
510         addr = do_mmap(obj->filp, 0, args->size,
511                        PROT_READ | PROT_WRITE, MAP_SHARED,
512                        args->offset);
513         up_write(&current->mm->mmap_sem);
514         mutex_lock(&dev->struct_mutex);
515         drm_gem_object_unreference(obj);
516         mutex_unlock(&dev->struct_mutex);
517         if (IS_ERR((void *)addr))
518                 return addr;
519
520         args->addr_ptr = (uint64_t) addr;
521
522         return 0;
523 }
524
525 static void
526 i915_gem_object_free_page_list(struct drm_gem_object *obj)
527 {
528         struct drm_i915_gem_object *obj_priv = obj->driver_private;
529         int page_count = obj->size / PAGE_SIZE;
530         int i;
531
532         if (obj_priv->page_list == NULL)
533                 return;
534
535
536         for (i = 0; i < page_count; i++)
537                 if (obj_priv->page_list[i] != NULL) {
538                         if (obj_priv->dirty)
539                                 set_page_dirty(obj_priv->page_list[i]);
540                         mark_page_accessed(obj_priv->page_list[i]);
541                         page_cache_release(obj_priv->page_list[i]);
542                 }
543         obj_priv->dirty = 0;
544
545         drm_free(obj_priv->page_list,
546                  page_count * sizeof(struct page *),
547                  DRM_MEM_DRIVER);
548         obj_priv->page_list = NULL;
549 }
550
551 static void
552 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
553 {
554         struct drm_device *dev = obj->dev;
555         drm_i915_private_t *dev_priv = dev->dev_private;
556         struct drm_i915_gem_object *obj_priv = obj->driver_private;
557
558         /* Add a reference if we're newly entering the active list. */
559         if (!obj_priv->active) {
560                 drm_gem_object_reference(obj);
561                 obj_priv->active = 1;
562         }
563         /* Move from whatever list we were on to the tail of execution. */
564         list_move_tail(&obj_priv->list,
565                        &dev_priv->mm.active_list);
566         obj_priv->last_rendering_seqno = seqno;
567 }
568
569 static void
570 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
571 {
572         struct drm_device *dev = obj->dev;
573         drm_i915_private_t *dev_priv = dev->dev_private;
574         struct drm_i915_gem_object *obj_priv = obj->driver_private;
575
576         BUG_ON(!obj_priv->active);
577         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
578         obj_priv->last_rendering_seqno = 0;
579 }
580
581 static void
582 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
583 {
584         struct drm_device *dev = obj->dev;
585         drm_i915_private_t *dev_priv = dev->dev_private;
586         struct drm_i915_gem_object *obj_priv = obj->driver_private;
587
588         i915_verify_inactive(dev, __FILE__, __LINE__);
589         if (obj_priv->pin_count != 0)
590                 list_del_init(&obj_priv->list);
591         else
592                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
593
594         obj_priv->last_rendering_seqno = 0;
595         if (obj_priv->active) {
596                 obj_priv->active = 0;
597                 drm_gem_object_unreference(obj);
598         }
599         i915_verify_inactive(dev, __FILE__, __LINE__);
600 }
601
602 /**
603  * Creates a new sequence number, emitting a write of it to the status page
604  * plus an interrupt, which will trigger i915_user_interrupt_handler.
605  *
606  * Must be called with struct_lock held.
607  *
608  * Returned sequence numbers are nonzero on success.
609  */
610 static uint32_t
611 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
612 {
613         drm_i915_private_t *dev_priv = dev->dev_private;
614         struct drm_i915_gem_request *request;
615         uint32_t seqno;
616         int was_empty;
617         RING_LOCALS;
618
619         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
620         if (request == NULL)
621                 return 0;
622
623         /* Grab the seqno we're going to make this request be, and bump the
624          * next (skipping 0 so it can be the reserved no-seqno value).
625          */
626         seqno = dev_priv->mm.next_gem_seqno;
627         dev_priv->mm.next_gem_seqno++;
628         if (dev_priv->mm.next_gem_seqno == 0)
629                 dev_priv->mm.next_gem_seqno++;
630
631         BEGIN_LP_RING(4);
632         OUT_RING(MI_STORE_DWORD_INDEX);
633         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
634         OUT_RING(seqno);
635
636         OUT_RING(MI_USER_INTERRUPT);
637         ADVANCE_LP_RING();
638
639         DRM_DEBUG("%d\n", seqno);
640
641         request->seqno = seqno;
642         request->emitted_jiffies = jiffies;
643         was_empty = list_empty(&dev_priv->mm.request_list);
644         list_add_tail(&request->list, &dev_priv->mm.request_list);
645
646         /* Associate any objects on the flushing list matching the write
647          * domain we're flushing with our flush.
648          */
649         if (flush_domains != 0) {
650                 struct drm_i915_gem_object *obj_priv, *next;
651
652                 list_for_each_entry_safe(obj_priv, next,
653                                          &dev_priv->mm.flushing_list, list) {
654                         struct drm_gem_object *obj = obj_priv->obj;
655
656                         if ((obj->write_domain & flush_domains) ==
657                             obj->write_domain) {
658                                 obj->write_domain = 0;
659                                 i915_gem_object_move_to_active(obj, seqno);
660                         }
661                 }
662
663         }
664
665         if (was_empty && !dev_priv->mm.suspended)
666                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
667         return seqno;
668 }
669
670 /**
671  * Command execution barrier
672  *
673  * Ensures that all commands in the ring are finished
674  * before signalling the CPU
675  */
676 static uint32_t
677 i915_retire_commands(struct drm_device *dev)
678 {
679         drm_i915_private_t *dev_priv = dev->dev_private;
680         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
681         uint32_t flush_domains = 0;
682         RING_LOCALS;
683
684         /* The sampler always gets flushed on i965 (sigh) */
685         if (IS_I965G(dev))
686                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
687         BEGIN_LP_RING(2);
688         OUT_RING(cmd);
689         OUT_RING(0); /* noop */
690         ADVANCE_LP_RING();
691         return flush_domains;
692 }
693
694 /**
695  * Moves buffers associated only with the given active seqno from the active
696  * to inactive list, potentially freeing them.
697  */
698 static void
699 i915_gem_retire_request(struct drm_device *dev,
700                         struct drm_i915_gem_request *request)
701 {
702         drm_i915_private_t *dev_priv = dev->dev_private;
703
704         /* Move any buffers on the active list that are no longer referenced
705          * by the ringbuffer to the flushing/inactive lists as appropriate.
706          */
707         while (!list_empty(&dev_priv->mm.active_list)) {
708                 struct drm_gem_object *obj;
709                 struct drm_i915_gem_object *obj_priv;
710
711                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
712                                             struct drm_i915_gem_object,
713                                             list);
714                 obj = obj_priv->obj;
715
716                 /* If the seqno being retired doesn't match the oldest in the
717                  * list, then the oldest in the list must still be newer than
718                  * this seqno.
719                  */
720                 if (obj_priv->last_rendering_seqno != request->seqno)
721                         return;
722 #if WATCH_LRU
723                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
724                          __func__, request->seqno, obj);
725 #endif
726
727                 if (obj->write_domain != 0)
728                         i915_gem_object_move_to_flushing(obj);
729                 else
730                         i915_gem_object_move_to_inactive(obj);
731         }
732 }
733
734 /**
735  * Returns true if seq1 is later than seq2.
736  */
737 static int
738 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
739 {
740         return (int32_t)(seq1 - seq2) >= 0;
741 }
742
743 uint32_t
744 i915_get_gem_seqno(struct drm_device *dev)
745 {
746         drm_i915_private_t *dev_priv = dev->dev_private;
747
748         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
749 }
750
751 /**
752  * This function clears the request list as sequence numbers are passed.
753  */
754 void
755 i915_gem_retire_requests(struct drm_device *dev)
756 {
757         drm_i915_private_t *dev_priv = dev->dev_private;
758         uint32_t seqno;
759
760         seqno = i915_get_gem_seqno(dev);
761
762         while (!list_empty(&dev_priv->mm.request_list)) {
763                 struct drm_i915_gem_request *request;
764                 uint32_t retiring_seqno;
765
766                 request = list_first_entry(&dev_priv->mm.request_list,
767                                            struct drm_i915_gem_request,
768                                            list);
769                 retiring_seqno = request->seqno;
770
771                 if (i915_seqno_passed(seqno, retiring_seqno) ||
772                     dev_priv->mm.wedged) {
773                         i915_gem_retire_request(dev, request);
774
775                         list_del(&request->list);
776                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
777                 } else
778                         break;
779         }
780 }
781
782 void
783 i915_gem_retire_work_handler(struct work_struct *work)
784 {
785         drm_i915_private_t *dev_priv;
786         struct drm_device *dev;
787
788         dev_priv = container_of(work, drm_i915_private_t,
789                                 mm.retire_work.work);
790         dev = dev_priv->dev;
791
792         mutex_lock(&dev->struct_mutex);
793         i915_gem_retire_requests(dev);
794         if (!dev_priv->mm.suspended &&
795             !list_empty(&dev_priv->mm.request_list))
796                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
797         mutex_unlock(&dev->struct_mutex);
798 }
799
800 /**
801  * Waits for a sequence number to be signaled, and cleans up the
802  * request and object lists appropriately for that event.
803  */
804 static int
805 i915_wait_request(struct drm_device *dev, uint32_t seqno)
806 {
807         drm_i915_private_t *dev_priv = dev->dev_private;
808         int ret = 0;
809
810         BUG_ON(seqno == 0);
811
812         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
813                 dev_priv->mm.waiting_gem_seqno = seqno;
814                 i915_user_irq_get(dev);
815                 ret = wait_event_interruptible(dev_priv->irq_queue,
816                                                i915_seqno_passed(i915_get_gem_seqno(dev),
817                                                                  seqno) ||
818                                                dev_priv->mm.wedged);
819                 i915_user_irq_put(dev);
820                 dev_priv->mm.waiting_gem_seqno = 0;
821         }
822         if (dev_priv->mm.wedged)
823                 ret = -EIO;
824
825         if (ret && ret != -ERESTARTSYS)
826                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
827                           __func__, ret, seqno, i915_get_gem_seqno(dev));
828
829         /* Directly dispatch request retiring.  While we have the work queue
830          * to handle this, the waiter on a request often wants an associated
831          * buffer to have made it to the inactive list, and we would need
832          * a separate wait queue to handle that.
833          */
834         if (ret == 0)
835                 i915_gem_retire_requests(dev);
836
837         return ret;
838 }
839
840 static void
841 i915_gem_flush(struct drm_device *dev,
842                uint32_t invalidate_domains,
843                uint32_t flush_domains)
844 {
845         drm_i915_private_t *dev_priv = dev->dev_private;
846         uint32_t cmd;
847         RING_LOCALS;
848
849 #if WATCH_EXEC
850         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
851                   invalidate_domains, flush_domains);
852 #endif
853
854         if (flush_domains & I915_GEM_DOMAIN_CPU)
855                 drm_agp_chipset_flush(dev);
856
857         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
858                                                      I915_GEM_DOMAIN_GTT)) {
859                 /*
860                  * read/write caches:
861                  *
862                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
863                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
864                  * also flushed at 2d versus 3d pipeline switches.
865                  *
866                  * read-only caches:
867                  *
868                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
869                  * MI_READ_FLUSH is set, and is always flushed on 965.
870                  *
871                  * I915_GEM_DOMAIN_COMMAND may not exist?
872                  *
873                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
874                  * invalidated when MI_EXE_FLUSH is set.
875                  *
876                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
877                  * invalidated with every MI_FLUSH.
878                  *
879                  * TLBs:
880                  *
881                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
882                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
883                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
884                  * are flushed at any MI_FLUSH.
885                  */
886
887                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
888                 if ((invalidate_domains|flush_domains) &
889                     I915_GEM_DOMAIN_RENDER)
890                         cmd &= ~MI_NO_WRITE_FLUSH;
891                 if (!IS_I965G(dev)) {
892                         /*
893                          * On the 965, the sampler cache always gets flushed
894                          * and this bit is reserved.
895                          */
896                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
897                                 cmd |= MI_READ_FLUSH;
898                 }
899                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
900                         cmd |= MI_EXE_FLUSH;
901
902 #if WATCH_EXEC
903                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
904 #endif
905                 BEGIN_LP_RING(2);
906                 OUT_RING(cmd);
907                 OUT_RING(0); /* noop */
908                 ADVANCE_LP_RING();
909         }
910 }
911
912 /**
913  * Ensures that all rendering to the object has completed and the object is
914  * safe to unbind from the GTT or access from the CPU.
915  */
916 static int
917 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
918 {
919         struct drm_device *dev = obj->dev;
920         struct drm_i915_gem_object *obj_priv = obj->driver_private;
921         int ret;
922
923         /* This function only exists to support waiting for existing rendering,
924          * not for emitting required flushes.
925          */
926         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
927
928         /* If there is rendering queued on the buffer being evicted, wait for
929          * it.
930          */
931         if (obj_priv->active) {
932 #if WATCH_BUF
933                 DRM_INFO("%s: object %p wait for seqno %08x\n",
934                           __func__, obj, obj_priv->last_rendering_seqno);
935 #endif
936                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
937                 if (ret != 0)
938                         return ret;
939         }
940
941         return 0;
942 }
943
944 /**
945  * Unbinds an object from the GTT aperture.
946  */
947 static int
948 i915_gem_object_unbind(struct drm_gem_object *obj)
949 {
950         struct drm_device *dev = obj->dev;
951         struct drm_i915_gem_object *obj_priv = obj->driver_private;
952         int ret = 0;
953
954 #if WATCH_BUF
955         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
956         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
957 #endif
958         if (obj_priv->gtt_space == NULL)
959                 return 0;
960
961         if (obj_priv->pin_count != 0) {
962                 DRM_ERROR("Attempting to unbind pinned buffer\n");
963                 return -EINVAL;
964         }
965
966         /* Move the object to the CPU domain to ensure that
967          * any possible CPU writes while it's not in the GTT
968          * are flushed when we go to remap it. This will
969          * also ensure that all pending GPU writes are finished
970          * before we unbind.
971          */
972         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
973         if (ret) {
974                 if (ret != -ERESTARTSYS)
975                         DRM_ERROR("set_domain failed: %d\n", ret);
976                 return ret;
977         }
978
979         if (obj_priv->agp_mem != NULL) {
980                 drm_unbind_agp(obj_priv->agp_mem);
981                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
982                 obj_priv->agp_mem = NULL;
983         }
984
985         BUG_ON(obj_priv->active);
986
987         i915_gem_object_free_page_list(obj);
988
989         if (obj_priv->gtt_space) {
990                 atomic_dec(&dev->gtt_count);
991                 atomic_sub(obj->size, &dev->gtt_memory);
992
993                 drm_mm_put_block(obj_priv->gtt_space);
994                 obj_priv->gtt_space = NULL;
995         }
996
997         /* Remove ourselves from the LRU list if present. */
998         if (!list_empty(&obj_priv->list))
999                 list_del_init(&obj_priv->list);
1000
1001         return 0;
1002 }
1003
1004 static int
1005 i915_gem_evict_something(struct drm_device *dev)
1006 {
1007         drm_i915_private_t *dev_priv = dev->dev_private;
1008         struct drm_gem_object *obj;
1009         struct drm_i915_gem_object *obj_priv;
1010         int ret = 0;
1011
1012         for (;;) {
1013                 /* If there's an inactive buffer available now, grab it
1014                  * and be done.
1015                  */
1016                 if (!list_empty(&dev_priv->mm.inactive_list)) {
1017                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1018                                                     struct drm_i915_gem_object,
1019                                                     list);
1020                         obj = obj_priv->obj;
1021                         BUG_ON(obj_priv->pin_count != 0);
1022 #if WATCH_LRU
1023                         DRM_INFO("%s: evicting %p\n", __func__, obj);
1024 #endif
1025                         BUG_ON(obj_priv->active);
1026
1027                         /* Wait on the rendering and unbind the buffer. */
1028                         ret = i915_gem_object_unbind(obj);
1029                         break;
1030                 }
1031
1032                 /* If we didn't get anything, but the ring is still processing
1033                  * things, wait for one of those things to finish and hopefully
1034                  * leave us a buffer to evict.
1035                  */
1036                 if (!list_empty(&dev_priv->mm.request_list)) {
1037                         struct drm_i915_gem_request *request;
1038
1039                         request = list_first_entry(&dev_priv->mm.request_list,
1040                                                    struct drm_i915_gem_request,
1041                                                    list);
1042
1043                         ret = i915_wait_request(dev, request->seqno);
1044                         if (ret)
1045                                 break;
1046
1047                         /* if waiting caused an object to become inactive,
1048                          * then loop around and wait for it. Otherwise, we
1049                          * assume that waiting freed and unbound something,
1050                          * so there should now be some space in the GTT
1051                          */
1052                         if (!list_empty(&dev_priv->mm.inactive_list))
1053                                 continue;
1054                         break;
1055                 }
1056
1057                 /* If we didn't have anything on the request list but there
1058                  * are buffers awaiting a flush, emit one and try again.
1059                  * When we wait on it, those buffers waiting for that flush
1060                  * will get moved to inactive.
1061                  */
1062                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1063                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1064                                                     struct drm_i915_gem_object,
1065                                                     list);
1066                         obj = obj_priv->obj;
1067
1068                         i915_gem_flush(dev,
1069                                        obj->write_domain,
1070                                        obj->write_domain);
1071                         i915_add_request(dev, obj->write_domain);
1072
1073                         obj = NULL;
1074                         continue;
1075                 }
1076
1077                 DRM_ERROR("inactive empty %d request empty %d "
1078                           "flushing empty %d\n",
1079                           list_empty(&dev_priv->mm.inactive_list),
1080                           list_empty(&dev_priv->mm.request_list),
1081                           list_empty(&dev_priv->mm.flushing_list));
1082                 /* If we didn't do any of the above, there's nothing to be done
1083                  * and we just can't fit it in.
1084                  */
1085                 return -ENOMEM;
1086         }
1087         return ret;
1088 }
1089
1090 static int
1091 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1092 {
1093         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1094         int page_count, i;
1095         struct address_space *mapping;
1096         struct inode *inode;
1097         struct page *page;
1098         int ret;
1099
1100         if (obj_priv->page_list)
1101                 return 0;
1102
1103         /* Get the list of pages out of our struct file.  They'll be pinned
1104          * at this point until we release them.
1105          */
1106         page_count = obj->size / PAGE_SIZE;
1107         BUG_ON(obj_priv->page_list != NULL);
1108         obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1109                                          DRM_MEM_DRIVER);
1110         if (obj_priv->page_list == NULL) {
1111                 DRM_ERROR("Faled to allocate page list\n");
1112                 return -ENOMEM;
1113         }
1114
1115         inode = obj->filp->f_path.dentry->d_inode;
1116         mapping = inode->i_mapping;
1117         for (i = 0; i < page_count; i++) {
1118                 page = read_mapping_page(mapping, i, NULL);
1119                 if (IS_ERR(page)) {
1120                         ret = PTR_ERR(page);
1121                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
1122                         i915_gem_object_free_page_list(obj);
1123                         return ret;
1124                 }
1125                 obj_priv->page_list[i] = page;
1126         }
1127         return 0;
1128 }
1129
1130 /**
1131  * Finds free space in the GTT aperture and binds the object there.
1132  */
1133 static int
1134 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1135 {
1136         struct drm_device *dev = obj->dev;
1137         drm_i915_private_t *dev_priv = dev->dev_private;
1138         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1139         struct drm_mm_node *free_space;
1140         int page_count, ret;
1141
1142         if (alignment == 0)
1143                 alignment = PAGE_SIZE;
1144         if (alignment & (PAGE_SIZE - 1)) {
1145                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1146                 return -EINVAL;
1147         }
1148
1149  search_free:
1150         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1151                                         obj->size, alignment, 0);
1152         if (free_space != NULL) {
1153                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1154                                                        alignment);
1155                 if (obj_priv->gtt_space != NULL) {
1156                         obj_priv->gtt_space->private = obj;
1157                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1158                 }
1159         }
1160         if (obj_priv->gtt_space == NULL) {
1161                 /* If the gtt is empty and we're still having trouble
1162                  * fitting our object in, we're out of memory.
1163                  */
1164 #if WATCH_LRU
1165                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1166 #endif
1167                 if (list_empty(&dev_priv->mm.inactive_list) &&
1168                     list_empty(&dev_priv->mm.flushing_list) &&
1169                     list_empty(&dev_priv->mm.active_list)) {
1170                         DRM_ERROR("GTT full, but LRU list empty\n");
1171                         return -ENOMEM;
1172                 }
1173
1174                 ret = i915_gem_evict_something(dev);
1175                 if (ret != 0) {
1176                         DRM_ERROR("Failed to evict a buffer %d\n", ret);
1177                         return ret;
1178                 }
1179                 goto search_free;
1180         }
1181
1182 #if WATCH_BUF
1183         DRM_INFO("Binding object of size %d at 0x%08x\n",
1184                  obj->size, obj_priv->gtt_offset);
1185 #endif
1186         ret = i915_gem_object_get_page_list(obj);
1187         if (ret) {
1188                 drm_mm_put_block(obj_priv->gtt_space);
1189                 obj_priv->gtt_space = NULL;
1190                 return ret;
1191         }
1192
1193         page_count = obj->size / PAGE_SIZE;
1194         /* Create an AGP memory structure pointing at our pages, and bind it
1195          * into the GTT.
1196          */
1197         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1198                                                obj_priv->page_list,
1199                                                page_count,
1200                                                obj_priv->gtt_offset,
1201                                                obj_priv->agp_type);
1202         if (obj_priv->agp_mem == NULL) {
1203                 i915_gem_object_free_page_list(obj);
1204                 drm_mm_put_block(obj_priv->gtt_space);
1205                 obj_priv->gtt_space = NULL;
1206                 return -ENOMEM;
1207         }
1208         atomic_inc(&dev->gtt_count);
1209         atomic_add(obj->size, &dev->gtt_memory);
1210
1211         /* Assert that the object is not currently in any GPU domain. As it
1212          * wasn't in the GTT, there shouldn't be any way it could have been in
1213          * a GPU cache
1214          */
1215         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1216         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1217
1218         return 0;
1219 }
1220
1221 void
1222 i915_gem_clflush_object(struct drm_gem_object *obj)
1223 {
1224         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1225
1226         /* If we don't have a page list set up, then we're not pinned
1227          * to GPU, and we can ignore the cache flush because it'll happen
1228          * again at bind time.
1229          */
1230         if (obj_priv->page_list == NULL)
1231                 return;
1232
1233         drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1234 }
1235
1236 /** Flushes any GPU write domain for the object if it's dirty. */
1237 static void
1238 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1239 {
1240         struct drm_device *dev = obj->dev;
1241         uint32_t seqno;
1242
1243         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1244                 return;
1245
1246         /* Queue the GPU write cache flushing we need. */
1247         i915_gem_flush(dev, 0, obj->write_domain);
1248         seqno = i915_add_request(dev, obj->write_domain);
1249         obj->write_domain = 0;
1250         i915_gem_object_move_to_active(obj, seqno);
1251 }
1252
1253 /** Flushes the GTT write domain for the object if it's dirty. */
1254 static void
1255 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1256 {
1257         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1258                 return;
1259
1260         /* No actual flushing is required for the GTT write domain.   Writes
1261          * to it immediately go to main memory as far as we know, so there's
1262          * no chipset flush.  It also doesn't land in render cache.
1263          */
1264         obj->write_domain = 0;
1265 }
1266
1267 /** Flushes the CPU write domain for the object if it's dirty. */
1268 static void
1269 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1270 {
1271         struct drm_device *dev = obj->dev;
1272
1273         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1274                 return;
1275
1276         i915_gem_clflush_object(obj);
1277         drm_agp_chipset_flush(dev);
1278         obj->write_domain = 0;
1279 }
1280
1281 /**
1282  * Moves a single object to the GTT read, and possibly write domain.
1283  *
1284  * This function returns when the move is complete, including waiting on
1285  * flushes to occur.
1286  */
1287 static int
1288 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1289 {
1290         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1291         int ret;
1292
1293         i915_gem_object_flush_gpu_write_domain(obj);
1294         /* Wait on any GPU rendering and flushing to occur. */
1295         ret = i915_gem_object_wait_rendering(obj);
1296         if (ret != 0)
1297                 return ret;
1298
1299         /* If we're writing through the GTT domain, then CPU and GPU caches
1300          * will need to be invalidated at next use.
1301          */
1302         if (write)
1303                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1304
1305         i915_gem_object_flush_cpu_write_domain(obj);
1306
1307         /* It should now be out of any other write domains, and we can update
1308          * the domain values for our changes.
1309          */
1310         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1311         obj->read_domains |= I915_GEM_DOMAIN_GTT;
1312         if (write) {
1313                 obj->write_domain = I915_GEM_DOMAIN_GTT;
1314                 obj_priv->dirty = 1;
1315         }
1316
1317         return 0;
1318 }
1319
1320 /**
1321  * Moves a single object to the CPU read, and possibly write domain.
1322  *
1323  * This function returns when the move is complete, including waiting on
1324  * flushes to occur.
1325  */
1326 static int
1327 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1328 {
1329         struct drm_device *dev = obj->dev;
1330         int ret;
1331
1332         i915_gem_object_flush_gpu_write_domain(obj);
1333         /* Wait on any GPU rendering and flushing to occur. */
1334         ret = i915_gem_object_wait_rendering(obj);
1335         if (ret != 0)
1336                 return ret;
1337
1338         i915_gem_object_flush_gtt_write_domain(obj);
1339
1340         /* If we have a partially-valid cache of the object in the CPU,
1341          * finish invalidating it and free the per-page flags.
1342          */
1343         i915_gem_object_set_to_full_cpu_read_domain(obj);
1344
1345         /* Flush the CPU cache if it's still invalid. */
1346         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1347                 i915_gem_clflush_object(obj);
1348                 drm_agp_chipset_flush(dev);
1349
1350                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1351         }
1352
1353         /* It should now be out of any other write domains, and we can update
1354          * the domain values for our changes.
1355          */
1356         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1357
1358         /* If we're writing through the CPU, then the GPU read domains will
1359          * need to be invalidated at next use.
1360          */
1361         if (write) {
1362                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1363                 obj->write_domain = I915_GEM_DOMAIN_CPU;
1364         }
1365
1366         return 0;
1367 }
1368
1369 /*
1370  * Set the next domain for the specified object. This
1371  * may not actually perform the necessary flushing/invaliding though,
1372  * as that may want to be batched with other set_domain operations
1373  *
1374  * This is (we hope) the only really tricky part of gem. The goal
1375  * is fairly simple -- track which caches hold bits of the object
1376  * and make sure they remain coherent. A few concrete examples may
1377  * help to explain how it works. For shorthand, we use the notation
1378  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1379  * a pair of read and write domain masks.
1380  *
1381  * Case 1: the batch buffer
1382  *
1383  *      1. Allocated
1384  *      2. Written by CPU
1385  *      3. Mapped to GTT
1386  *      4. Read by GPU
1387  *      5. Unmapped from GTT
1388  *      6. Freed
1389  *
1390  *      Let's take these a step at a time
1391  *
1392  *      1. Allocated
1393  *              Pages allocated from the kernel may still have
1394  *              cache contents, so we set them to (CPU, CPU) always.
1395  *      2. Written by CPU (using pwrite)
1396  *              The pwrite function calls set_domain (CPU, CPU) and
1397  *              this function does nothing (as nothing changes)
1398  *      3. Mapped by GTT
1399  *              This function asserts that the object is not
1400  *              currently in any GPU-based read or write domains
1401  *      4. Read by GPU
1402  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
1403  *              As write_domain is zero, this function adds in the
1404  *              current read domains (CPU+COMMAND, 0).
1405  *              flush_domains is set to CPU.
1406  *              invalidate_domains is set to COMMAND
1407  *              clflush is run to get data out of the CPU caches
1408  *              then i915_dev_set_domain calls i915_gem_flush to
1409  *              emit an MI_FLUSH and drm_agp_chipset_flush
1410  *      5. Unmapped from GTT
1411  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
1412  *              flush_domains and invalidate_domains end up both zero
1413  *              so no flushing/invalidating happens
1414  *      6. Freed
1415  *              yay, done
1416  *
1417  * Case 2: The shared render buffer
1418  *
1419  *      1. Allocated
1420  *      2. Mapped to GTT
1421  *      3. Read/written by GPU
1422  *      4. set_domain to (CPU,CPU)
1423  *      5. Read/written by CPU
1424  *      6. Read/written by GPU
1425  *
1426  *      1. Allocated
1427  *              Same as last example, (CPU, CPU)
1428  *      2. Mapped to GTT
1429  *              Nothing changes (assertions find that it is not in the GPU)
1430  *      3. Read/written by GPU
1431  *              execbuffer calls set_domain (RENDER, RENDER)
1432  *              flush_domains gets CPU
1433  *              invalidate_domains gets GPU
1434  *              clflush (obj)
1435  *              MI_FLUSH and drm_agp_chipset_flush
1436  *      4. set_domain (CPU, CPU)
1437  *              flush_domains gets GPU
1438  *              invalidate_domains gets CPU
1439  *              wait_rendering (obj) to make sure all drawing is complete.
1440  *              This will include an MI_FLUSH to get the data from GPU
1441  *              to memory
1442  *              clflush (obj) to invalidate the CPU cache
1443  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1444  *      5. Read/written by CPU
1445  *              cache lines are loaded and dirtied
1446  *      6. Read written by GPU
1447  *              Same as last GPU access
1448  *
1449  * Case 3: The constant buffer
1450  *
1451  *      1. Allocated
1452  *      2. Written by CPU
1453  *      3. Read by GPU
1454  *      4. Updated (written) by CPU again
1455  *      5. Read by GPU
1456  *
1457  *      1. Allocated
1458  *              (CPU, CPU)
1459  *      2. Written by CPU
1460  *              (CPU, CPU)
1461  *      3. Read by GPU
1462  *              (CPU+RENDER, 0)
1463  *              flush_domains = CPU
1464  *              invalidate_domains = RENDER
1465  *              clflush (obj)
1466  *              MI_FLUSH
1467  *              drm_agp_chipset_flush
1468  *      4. Updated (written) by CPU again
1469  *              (CPU, CPU)
1470  *              flush_domains = 0 (no previous write domain)
1471  *              invalidate_domains = 0 (no new read domains)
1472  *      5. Read by GPU
1473  *              (CPU+RENDER, 0)
1474  *              flush_domains = CPU
1475  *              invalidate_domains = RENDER
1476  *              clflush (obj)
1477  *              MI_FLUSH
1478  *              drm_agp_chipset_flush
1479  */
1480 static int
1481 i915_gem_object_set_domain(struct drm_gem_object *obj,
1482                             uint32_t read_domains,
1483                             uint32_t write_domain)
1484 {
1485         struct drm_device               *dev = obj->dev;
1486         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1487         uint32_t                        invalidate_domains = 0;
1488         uint32_t                        flush_domains = 0;
1489
1490         BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1491         BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1492
1493 #if WATCH_BUF
1494         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1495                  __func__, obj,
1496                  obj->read_domains, read_domains,
1497                  obj->write_domain, write_domain);
1498 #endif
1499         /*
1500          * If the object isn't moving to a new write domain,
1501          * let the object stay in multiple read domains
1502          */
1503         if (write_domain == 0)
1504                 read_domains |= obj->read_domains;
1505         else
1506                 obj_priv->dirty = 1;
1507
1508         /*
1509          * Flush the current write domain if
1510          * the new read domains don't match. Invalidate
1511          * any read domains which differ from the old
1512          * write domain
1513          */
1514         if (obj->write_domain && obj->write_domain != read_domains) {
1515                 flush_domains |= obj->write_domain;
1516                 invalidate_domains |= read_domains & ~obj->write_domain;
1517         }
1518         /*
1519          * Invalidate any read caches which may have
1520          * stale data. That is, any new read domains.
1521          */
1522         invalidate_domains |= read_domains & ~obj->read_domains;
1523         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1524 #if WATCH_BUF
1525                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1526                          __func__, flush_domains, invalidate_domains);
1527 #endif
1528                 i915_gem_clflush_object(obj);
1529         }
1530
1531         if ((write_domain | flush_domains) != 0)
1532                 obj->write_domain = write_domain;
1533         obj->read_domains = read_domains;
1534
1535         dev->invalidate_domains |= invalidate_domains;
1536         dev->flush_domains |= flush_domains;
1537 #if WATCH_BUF
1538         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1539                  __func__,
1540                  obj->read_domains, obj->write_domain,
1541                  dev->invalidate_domains, dev->flush_domains);
1542 #endif
1543         return 0;
1544 }
1545
1546 /**
1547  * Moves the object from a partially CPU read to a full one.
1548  *
1549  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1550  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1551  */
1552 static void
1553 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1554 {
1555         struct drm_device *dev = obj->dev;
1556         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1557
1558         if (!obj_priv->page_cpu_valid)
1559                 return;
1560
1561         /* If we're partially in the CPU read domain, finish moving it in.
1562          */
1563         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1564                 int i;
1565
1566                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1567                         if (obj_priv->page_cpu_valid[i])
1568                                 continue;
1569                         drm_clflush_pages(obj_priv->page_list + i, 1);
1570                 }
1571                 drm_agp_chipset_flush(dev);
1572         }
1573
1574         /* Free the page_cpu_valid mappings which are now stale, whether
1575          * or not we've got I915_GEM_DOMAIN_CPU.
1576          */
1577         drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1578                  DRM_MEM_DRIVER);
1579         obj_priv->page_cpu_valid = NULL;
1580 }
1581
1582 /**
1583  * Set the CPU read domain on a range of the object.
1584  *
1585  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1586  * not entirely valid.  The page_cpu_valid member of the object flags which
1587  * pages have been flushed, and will be respected by
1588  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1589  * of the whole object.
1590  *
1591  * This function returns when the move is complete, including waiting on
1592  * flushes to occur.
1593  */
1594 static int
1595 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1596                                           uint64_t offset, uint64_t size)
1597 {
1598         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1599         int i, ret;
1600
1601         if (offset == 0 && size == obj->size)
1602                 return i915_gem_object_set_to_cpu_domain(obj, 0);
1603
1604         i915_gem_object_flush_gpu_write_domain(obj);
1605         /* Wait on any GPU rendering and flushing to occur. */
1606         ret = i915_gem_object_wait_rendering(obj);
1607         if (ret != 0)
1608                 return ret;
1609         i915_gem_object_flush_gtt_write_domain(obj);
1610
1611         /* If we're already fully in the CPU read domain, we're done. */
1612         if (obj_priv->page_cpu_valid == NULL &&
1613             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1614                 return 0;
1615
1616         /* Otherwise, create/clear the per-page CPU read domain flag if we're
1617          * newly adding I915_GEM_DOMAIN_CPU
1618          */
1619         if (obj_priv->page_cpu_valid == NULL) {
1620                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1621                                                       DRM_MEM_DRIVER);
1622                 if (obj_priv->page_cpu_valid == NULL)
1623                         return -ENOMEM;
1624         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1625                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1626
1627         /* Flush the cache on any pages that are still invalid from the CPU's
1628          * perspective.
1629          */
1630         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1631              i++) {
1632                 if (obj_priv->page_cpu_valid[i])
1633                         continue;
1634
1635                 drm_clflush_pages(obj_priv->page_list + i, 1);
1636
1637                 obj_priv->page_cpu_valid[i] = 1;
1638         }
1639
1640         /* It should now be out of any other write domains, and we can update
1641          * the domain values for our changes.
1642          */
1643         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1644
1645         obj->read_domains |= I915_GEM_DOMAIN_CPU;
1646
1647         return 0;
1648 }
1649
1650 /**
1651  * Once all of the objects have been set in the proper domain,
1652  * perform the necessary flush and invalidate operations.
1653  *
1654  * Returns the write domains flushed, for use in flush tracking.
1655  */
1656 static uint32_t
1657 i915_gem_dev_set_domain(struct drm_device *dev)
1658 {
1659         uint32_t flush_domains = dev->flush_domains;
1660
1661         /*
1662          * Now that all the buffers are synced to the proper domains,
1663          * flush and invalidate the collected domains
1664          */
1665         if (dev->invalidate_domains | dev->flush_domains) {
1666 #if WATCH_EXEC
1667                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1668                           __func__,
1669                          dev->invalidate_domains,
1670                          dev->flush_domains);
1671 #endif
1672                 i915_gem_flush(dev,
1673                                dev->invalidate_domains,
1674                                dev->flush_domains);
1675                 dev->invalidate_domains = 0;
1676                 dev->flush_domains = 0;
1677         }
1678
1679         return flush_domains;
1680 }
1681
1682 /**
1683  * Pin an object to the GTT and evaluate the relocations landing in it.
1684  */
1685 static int
1686 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1687                                  struct drm_file *file_priv,
1688                                  struct drm_i915_gem_exec_object *entry)
1689 {
1690         struct drm_device *dev = obj->dev;
1691         drm_i915_private_t *dev_priv = dev->dev_private;
1692         struct drm_i915_gem_relocation_entry reloc;
1693         struct drm_i915_gem_relocation_entry __user *relocs;
1694         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1695         int i, ret;
1696         void __iomem *reloc_page;
1697
1698         /* Choose the GTT offset for our buffer and put it there. */
1699         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1700         if (ret)
1701                 return ret;
1702
1703         entry->offset = obj_priv->gtt_offset;
1704
1705         relocs = (struct drm_i915_gem_relocation_entry __user *)
1706                  (uintptr_t) entry->relocs_ptr;
1707         /* Apply the relocations, using the GTT aperture to avoid cache
1708          * flushing requirements.
1709          */
1710         for (i = 0; i < entry->relocation_count; i++) {
1711                 struct drm_gem_object *target_obj;
1712                 struct drm_i915_gem_object *target_obj_priv;
1713                 uint32_t reloc_val, reloc_offset;
1714                 uint32_t __iomem *reloc_entry;
1715
1716                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1717                 if (ret != 0) {
1718                         i915_gem_object_unpin(obj);
1719                         return ret;
1720                 }
1721
1722                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1723                                                    reloc.target_handle);
1724                 if (target_obj == NULL) {
1725                         i915_gem_object_unpin(obj);
1726                         return -EBADF;
1727                 }
1728                 target_obj_priv = target_obj->driver_private;
1729
1730                 /* The target buffer should have appeared before us in the
1731                  * exec_object list, so it should have a GTT space bound by now.
1732                  */
1733                 if (target_obj_priv->gtt_space == NULL) {
1734                         DRM_ERROR("No GTT space found for object %d\n",
1735                                   reloc.target_handle);
1736                         drm_gem_object_unreference(target_obj);
1737                         i915_gem_object_unpin(obj);
1738                         return -EINVAL;
1739                 }
1740
1741                 if (reloc.offset > obj->size - 4) {
1742                         DRM_ERROR("Relocation beyond object bounds: "
1743                                   "obj %p target %d offset %d size %d.\n",
1744                                   obj, reloc.target_handle,
1745                                   (int) reloc.offset, (int) obj->size);
1746                         drm_gem_object_unreference(target_obj);
1747                         i915_gem_object_unpin(obj);
1748                         return -EINVAL;
1749                 }
1750                 if (reloc.offset & 3) {
1751                         DRM_ERROR("Relocation not 4-byte aligned: "
1752                                   "obj %p target %d offset %d.\n",
1753                                   obj, reloc.target_handle,
1754                                   (int) reloc.offset);
1755                         drm_gem_object_unreference(target_obj);
1756                         i915_gem_object_unpin(obj);
1757                         return -EINVAL;
1758                 }
1759
1760                 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1761                     reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1762                         DRM_ERROR("reloc with read/write CPU domains: "
1763                                   "obj %p target %d offset %d "
1764                                   "read %08x write %08x",
1765                                   obj, reloc.target_handle,
1766                                   (int) reloc.offset,
1767                                   reloc.read_domains,
1768                                   reloc.write_domain);
1769                         return -EINVAL;
1770                 }
1771
1772                 if (reloc.write_domain && target_obj->pending_write_domain &&
1773                     reloc.write_domain != target_obj->pending_write_domain) {
1774                         DRM_ERROR("Write domain conflict: "
1775                                   "obj %p target %d offset %d "
1776                                   "new %08x old %08x\n",
1777                                   obj, reloc.target_handle,
1778                                   (int) reloc.offset,
1779                                   reloc.write_domain,
1780                                   target_obj->pending_write_domain);
1781                         drm_gem_object_unreference(target_obj);
1782                         i915_gem_object_unpin(obj);
1783                         return -EINVAL;
1784                 }
1785
1786 #if WATCH_RELOC
1787                 DRM_INFO("%s: obj %p offset %08x target %d "
1788                          "read %08x write %08x gtt %08x "
1789                          "presumed %08x delta %08x\n",
1790                          __func__,
1791                          obj,
1792                          (int) reloc.offset,
1793                          (int) reloc.target_handle,
1794                          (int) reloc.read_domains,
1795                          (int) reloc.write_domain,
1796                          (int) target_obj_priv->gtt_offset,
1797                          (int) reloc.presumed_offset,
1798                          reloc.delta);
1799 #endif
1800
1801                 target_obj->pending_read_domains |= reloc.read_domains;
1802                 target_obj->pending_write_domain |= reloc.write_domain;
1803
1804                 /* If the relocation already has the right value in it, no
1805                  * more work needs to be done.
1806                  */
1807                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1808                         drm_gem_object_unreference(target_obj);
1809                         continue;
1810                 }
1811
1812                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1813                 if (ret != 0) {
1814                         drm_gem_object_unreference(target_obj);
1815                         i915_gem_object_unpin(obj);
1816                         return -EINVAL;
1817                 }
1818
1819                 /* Map the page containing the relocation we're going to
1820                  * perform.
1821                  */
1822                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1823                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1824                                                       (reloc_offset &
1825                                                        ~(PAGE_SIZE - 1)));
1826                 reloc_entry = (uint32_t __iomem *)(reloc_page +
1827                                                    (reloc_offset & (PAGE_SIZE - 1)));
1828                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1829
1830 #if WATCH_BUF
1831                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1832                           obj, (unsigned int) reloc.offset,
1833                           readl(reloc_entry), reloc_val);
1834 #endif
1835                 writel(reloc_val, reloc_entry);
1836                 io_mapping_unmap_atomic(reloc_page);
1837
1838                 /* Write the updated presumed offset for this entry back out
1839                  * to the user.
1840                  */
1841                 reloc.presumed_offset = target_obj_priv->gtt_offset;
1842                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1843                 if (ret != 0) {
1844                         drm_gem_object_unreference(target_obj);
1845                         i915_gem_object_unpin(obj);
1846                         return ret;
1847                 }
1848
1849                 drm_gem_object_unreference(target_obj);
1850         }
1851
1852 #if WATCH_BUF
1853         if (0)
1854                 i915_gem_dump_object(obj, 128, __func__, ~0);
1855 #endif
1856         return 0;
1857 }
1858
1859 /** Dispatch a batchbuffer to the ring
1860  */
1861 static int
1862 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1863                               struct drm_i915_gem_execbuffer *exec,
1864                               uint64_t exec_offset)
1865 {
1866         drm_i915_private_t *dev_priv = dev->dev_private;
1867         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1868                                              (uintptr_t) exec->cliprects_ptr;
1869         int nbox = exec->num_cliprects;
1870         int i = 0, count;
1871         uint32_t        exec_start, exec_len;
1872         RING_LOCALS;
1873
1874         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1875         exec_len = (uint32_t) exec->batch_len;
1876
1877         if ((exec_start | exec_len) & 0x7) {
1878                 DRM_ERROR("alignment\n");
1879                 return -EINVAL;
1880         }
1881
1882         if (!exec_start)
1883                 return -EINVAL;
1884
1885         count = nbox ? nbox : 1;
1886
1887         for (i = 0; i < count; i++) {
1888                 if (i < nbox) {
1889                         int ret = i915_emit_box(dev, boxes, i,
1890                                                 exec->DR1, exec->DR4);
1891                         if (ret)
1892                                 return ret;
1893                 }
1894
1895                 if (IS_I830(dev) || IS_845G(dev)) {
1896                         BEGIN_LP_RING(4);
1897                         OUT_RING(MI_BATCH_BUFFER);
1898                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1899                         OUT_RING(exec_start + exec_len - 4);
1900                         OUT_RING(0);
1901                         ADVANCE_LP_RING();
1902                 } else {
1903                         BEGIN_LP_RING(2);
1904                         if (IS_I965G(dev)) {
1905                                 OUT_RING(MI_BATCH_BUFFER_START |
1906                                          (2 << 6) |
1907                                          MI_BATCH_NON_SECURE_I965);
1908                                 OUT_RING(exec_start);
1909                         } else {
1910                                 OUT_RING(MI_BATCH_BUFFER_START |
1911                                          (2 << 6));
1912                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1913                         }
1914                         ADVANCE_LP_RING();
1915                 }
1916         }
1917
1918         /* XXX breadcrumb */
1919         return 0;
1920 }
1921
1922 /* Throttle our rendering by waiting until the ring has completed our requests
1923  * emitted over 20 msec ago.
1924  *
1925  * This should get us reasonable parallelism between CPU and GPU but also
1926  * relatively low latency when blocking on a particular request to finish.
1927  */
1928 static int
1929 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1930 {
1931         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1932         int ret = 0;
1933         uint32_t seqno;
1934
1935         mutex_lock(&dev->struct_mutex);
1936         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1937         i915_file_priv->mm.last_gem_throttle_seqno =
1938                 i915_file_priv->mm.last_gem_seqno;
1939         if (seqno)
1940                 ret = i915_wait_request(dev, seqno);
1941         mutex_unlock(&dev->struct_mutex);
1942         return ret;
1943 }
1944
1945 int
1946 i915_gem_execbuffer(struct drm_device *dev, void *data,
1947                     struct drm_file *file_priv)
1948 {
1949         drm_i915_private_t *dev_priv = dev->dev_private;
1950         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1951         struct drm_i915_gem_execbuffer *args = data;
1952         struct drm_i915_gem_exec_object *exec_list = NULL;
1953         struct drm_gem_object **object_list = NULL;
1954         struct drm_gem_object *batch_obj;
1955         int ret, i, pinned = 0;
1956         uint64_t exec_offset;
1957         uint32_t seqno, flush_domains;
1958
1959 #if WATCH_EXEC
1960         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1961                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1962 #endif
1963
1964         if (args->buffer_count < 1) {
1965                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1966                 return -EINVAL;
1967         }
1968         /* Copy in the exec list from userland */
1969         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1970                                DRM_MEM_DRIVER);
1971         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1972                                  DRM_MEM_DRIVER);
1973         if (exec_list == NULL || object_list == NULL) {
1974                 DRM_ERROR("Failed to allocate exec or object list "
1975                           "for %d buffers\n",
1976                           args->buffer_count);
1977                 ret = -ENOMEM;
1978                 goto pre_mutex_err;
1979         }
1980         ret = copy_from_user(exec_list,
1981                              (struct drm_i915_relocation_entry __user *)
1982                              (uintptr_t) args->buffers_ptr,
1983                              sizeof(*exec_list) * args->buffer_count);
1984         if (ret != 0) {
1985                 DRM_ERROR("copy %d exec entries failed %d\n",
1986                           args->buffer_count, ret);
1987                 goto pre_mutex_err;
1988         }
1989
1990         mutex_lock(&dev->struct_mutex);
1991
1992         i915_verify_inactive(dev, __FILE__, __LINE__);
1993
1994         if (dev_priv->mm.wedged) {
1995                 DRM_ERROR("Execbuf while wedged\n");
1996                 mutex_unlock(&dev->struct_mutex);
1997                 return -EIO;
1998         }
1999
2000         if (dev_priv->mm.suspended) {
2001                 DRM_ERROR("Execbuf while VT-switched.\n");
2002                 mutex_unlock(&dev->struct_mutex);
2003                 return -EBUSY;
2004         }
2005
2006         /* Zero the gloabl flush/invalidate flags. These
2007          * will be modified as each object is bound to the
2008          * gtt
2009          */
2010         dev->invalidate_domains = 0;
2011         dev->flush_domains = 0;
2012
2013         /* Look up object handles and perform the relocations */
2014         for (i = 0; i < args->buffer_count; i++) {
2015                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
2016                                                        exec_list[i].handle);
2017                 if (object_list[i] == NULL) {
2018                         DRM_ERROR("Invalid object handle %d at index %d\n",
2019                                    exec_list[i].handle, i);
2020                         ret = -EBADF;
2021                         goto err;
2022                 }
2023
2024                 object_list[i]->pending_read_domains = 0;
2025                 object_list[i]->pending_write_domain = 0;
2026                 ret = i915_gem_object_pin_and_relocate(object_list[i],
2027                                                        file_priv,
2028                                                        &exec_list[i]);
2029                 if (ret) {
2030                         DRM_ERROR("object bind and relocate failed %d\n", ret);
2031                         goto err;
2032                 }
2033                 pinned = i + 1;
2034         }
2035
2036         /* Set the pending read domains for the batch buffer to COMMAND */
2037         batch_obj = object_list[args->buffer_count-1];
2038         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
2039         batch_obj->pending_write_domain = 0;
2040
2041         i915_verify_inactive(dev, __FILE__, __LINE__);
2042
2043         for (i = 0; i < args->buffer_count; i++) {
2044                 struct drm_gem_object *obj = object_list[i];
2045
2046                 /* make sure all previous memory operations have passed */
2047                 ret = i915_gem_object_set_domain(obj,
2048                                                  obj->pending_read_domains,
2049                                                  obj->pending_write_domain);
2050                 if (ret) {
2051                         /* As we've partially updated domains on our buffers,
2052                          * we have to emit the flush we've accumulated
2053                          * before exiting, or we'll have broken the
2054                          * active/flushing/inactive invariants.
2055                          *
2056                          * We'll potentially have some things marked as
2057                          * being in write domains that they actually aren't,
2058                          * but that should be merely a minor performance loss.
2059                          */
2060                         flush_domains = i915_gem_dev_set_domain(dev);
2061                         (void)i915_add_request(dev, flush_domains);
2062                         goto err;
2063                 }
2064         }
2065
2066         i915_verify_inactive(dev, __FILE__, __LINE__);
2067
2068         /* Flush/invalidate caches and chipset buffer */
2069         flush_domains = i915_gem_dev_set_domain(dev);
2070
2071         i915_verify_inactive(dev, __FILE__, __LINE__);
2072
2073 #if WATCH_COHERENCY
2074         for (i = 0; i < args->buffer_count; i++) {
2075                 i915_gem_object_check_coherency(object_list[i],
2076                                                 exec_list[i].handle);
2077         }
2078 #endif
2079
2080         exec_offset = exec_list[args->buffer_count - 1].offset;
2081
2082 #if WATCH_EXEC
2083         i915_gem_dump_object(object_list[args->buffer_count - 1],
2084                               args->batch_len,
2085                               __func__,
2086                               ~0);
2087 #endif
2088
2089         (void)i915_add_request(dev, flush_domains);
2090
2091         /* Exec the batchbuffer */
2092         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
2093         if (ret) {
2094                 DRM_ERROR("dispatch failed %d\n", ret);
2095                 goto err;
2096         }
2097
2098         /*
2099          * Ensure that the commands in the batch buffer are
2100          * finished before the interrupt fires
2101          */
2102         flush_domains = i915_retire_commands(dev);
2103
2104         i915_verify_inactive(dev, __FILE__, __LINE__);
2105
2106         /*
2107          * Get a seqno representing the execution of the current buffer,
2108          * which we can wait on.  We would like to mitigate these interrupts,
2109          * likely by only creating seqnos occasionally (so that we have
2110          * *some* interrupts representing completion of buffers that we can
2111          * wait on when trying to clear up gtt space).
2112          */
2113         seqno = i915_add_request(dev, flush_domains);
2114         BUG_ON(seqno == 0);
2115         i915_file_priv->mm.last_gem_seqno = seqno;
2116         for (i = 0; i < args->buffer_count; i++) {
2117                 struct drm_gem_object *obj = object_list[i];
2118
2119                 i915_gem_object_move_to_active(obj, seqno);
2120 #if WATCH_LRU
2121                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
2122 #endif
2123         }
2124 #if WATCH_LRU
2125         i915_dump_lru(dev, __func__);
2126 #endif
2127
2128         i915_verify_inactive(dev, __FILE__, __LINE__);
2129
2130         /* Copy the new buffer offsets back to the user's exec list. */
2131         ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2132                            (uintptr_t) args->buffers_ptr,
2133                            exec_list,
2134                            sizeof(*exec_list) * args->buffer_count);
2135         if (ret)
2136                 DRM_ERROR("failed to copy %d exec entries "
2137                           "back to user (%d)\n",
2138                            args->buffer_count, ret);
2139 err:
2140         if (object_list != NULL) {
2141                 for (i = 0; i < pinned; i++)
2142                         i915_gem_object_unpin(object_list[i]);
2143
2144                 for (i = 0; i < args->buffer_count; i++)
2145                         drm_gem_object_unreference(object_list[i]);
2146         }
2147         mutex_unlock(&dev->struct_mutex);
2148
2149 pre_mutex_err:
2150         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2151                  DRM_MEM_DRIVER);
2152         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2153                  DRM_MEM_DRIVER);
2154
2155         return ret;
2156 }
2157
2158 int
2159 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2160 {
2161         struct drm_device *dev = obj->dev;
2162         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2163         int ret;
2164
2165         i915_verify_inactive(dev, __FILE__, __LINE__);
2166         if (obj_priv->gtt_space == NULL) {
2167                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2168                 if (ret != 0) {
2169                         DRM_ERROR("Failure to bind: %d", ret);
2170                         return ret;
2171                 }
2172         }
2173         obj_priv->pin_count++;
2174
2175         /* If the object is not active and not pending a flush,
2176          * remove it from the inactive list
2177          */
2178         if (obj_priv->pin_count == 1) {
2179                 atomic_inc(&dev->pin_count);
2180                 atomic_add(obj->size, &dev->pin_memory);
2181                 if (!obj_priv->active &&
2182                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2183                                            I915_GEM_DOMAIN_GTT)) == 0 &&
2184                     !list_empty(&obj_priv->list))
2185                         list_del_init(&obj_priv->list);
2186         }
2187         i915_verify_inactive(dev, __FILE__, __LINE__);
2188
2189         return 0;
2190 }
2191
2192 void
2193 i915_gem_object_unpin(struct drm_gem_object *obj)
2194 {
2195         struct drm_device *dev = obj->dev;
2196         drm_i915_private_t *dev_priv = dev->dev_private;
2197         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2198
2199         i915_verify_inactive(dev, __FILE__, __LINE__);
2200         obj_priv->pin_count--;
2201         BUG_ON(obj_priv->pin_count < 0);
2202         BUG_ON(obj_priv->gtt_space == NULL);
2203
2204         /* If the object is no longer pinned, and is
2205          * neither active nor being flushed, then stick it on
2206          * the inactive list
2207          */
2208         if (obj_priv->pin_count == 0) {
2209                 if (!obj_priv->active &&
2210                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2211                                            I915_GEM_DOMAIN_GTT)) == 0)
2212                         list_move_tail(&obj_priv->list,
2213                                        &dev_priv->mm.inactive_list);
2214                 atomic_dec(&dev->pin_count);
2215                 atomic_sub(obj->size, &dev->pin_memory);
2216         }
2217         i915_verify_inactive(dev, __FILE__, __LINE__);
2218 }
2219
2220 int
2221 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2222                    struct drm_file *file_priv)
2223 {
2224         struct drm_i915_gem_pin *args = data;
2225         struct drm_gem_object *obj;
2226         struct drm_i915_gem_object *obj_priv;
2227         int ret;
2228
2229         mutex_lock(&dev->struct_mutex);
2230
2231         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2232         if (obj == NULL) {
2233                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2234                           args->handle);
2235                 mutex_unlock(&dev->struct_mutex);
2236                 return -EBADF;
2237         }
2238         obj_priv = obj->driver_private;
2239
2240         ret = i915_gem_object_pin(obj, args->alignment);
2241         if (ret != 0) {
2242                 drm_gem_object_unreference(obj);
2243                 mutex_unlock(&dev->struct_mutex);
2244                 return ret;
2245         }
2246
2247         /* XXX - flush the CPU caches for pinned objects
2248          * as the X server doesn't manage domains yet
2249          */
2250         i915_gem_object_flush_cpu_write_domain(obj);
2251         args->offset = obj_priv->gtt_offset;
2252         drm_gem_object_unreference(obj);
2253         mutex_unlock(&dev->struct_mutex);
2254
2255         return 0;
2256 }
2257
2258 int
2259 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2260                      struct drm_file *file_priv)
2261 {
2262         struct drm_i915_gem_pin *args = data;
2263         struct drm_gem_object *obj;
2264
2265         mutex_lock(&dev->struct_mutex);
2266
2267         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2268         if (obj == NULL) {
2269                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2270                           args->handle);
2271                 mutex_unlock(&dev->struct_mutex);
2272                 return -EBADF;
2273         }
2274
2275         i915_gem_object_unpin(obj);
2276
2277         drm_gem_object_unreference(obj);
2278         mutex_unlock(&dev->struct_mutex);
2279         return 0;
2280 }
2281
2282 int
2283 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2284                     struct drm_file *file_priv)
2285 {
2286         struct drm_i915_gem_busy *args = data;
2287         struct drm_gem_object *obj;
2288         struct drm_i915_gem_object *obj_priv;
2289
2290         mutex_lock(&dev->struct_mutex);
2291         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2292         if (obj == NULL) {
2293                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2294                           args->handle);
2295                 mutex_unlock(&dev->struct_mutex);
2296                 return -EBADF;
2297         }
2298
2299         obj_priv = obj->driver_private;
2300         args->busy = obj_priv->active;
2301
2302         drm_gem_object_unreference(obj);
2303         mutex_unlock(&dev->struct_mutex);
2304         return 0;
2305 }
2306
2307 int
2308 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2309                         struct drm_file *file_priv)
2310 {
2311     return i915_gem_ring_throttle(dev, file_priv);
2312 }
2313
2314 int i915_gem_init_object(struct drm_gem_object *obj)
2315 {
2316         struct drm_i915_gem_object *obj_priv;
2317
2318         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2319         if (obj_priv == NULL)
2320                 return -ENOMEM;
2321
2322         /*
2323          * We've just allocated pages from the kernel,
2324          * so they've just been written by the CPU with
2325          * zeros. They'll need to be clflushed before we
2326          * use them with the GPU.
2327          */
2328         obj->write_domain = I915_GEM_DOMAIN_CPU;
2329         obj->read_domains = I915_GEM_DOMAIN_CPU;
2330
2331         obj_priv->agp_type = AGP_USER_MEMORY;
2332
2333         obj->driver_private = obj_priv;
2334         obj_priv->obj = obj;
2335         INIT_LIST_HEAD(&obj_priv->list);
2336         return 0;
2337 }
2338
2339 void i915_gem_free_object(struct drm_gem_object *obj)
2340 {
2341         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2342
2343         while (obj_priv->pin_count > 0)
2344                 i915_gem_object_unpin(obj);
2345
2346         i915_gem_object_unbind(obj);
2347
2348         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2349         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2350 }
2351
2352 /** Unbinds all objects that are on the given buffer list. */
2353 static int
2354 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2355 {
2356         struct drm_gem_object *obj;
2357         struct drm_i915_gem_object *obj_priv;
2358         int ret;
2359
2360         while (!list_empty(head)) {
2361                 obj_priv = list_first_entry(head,
2362                                             struct drm_i915_gem_object,
2363                                             list);
2364                 obj = obj_priv->obj;
2365
2366                 if (obj_priv->pin_count != 0) {
2367                         DRM_ERROR("Pinned object in unbind list\n");
2368                         mutex_unlock(&dev->struct_mutex);
2369                         return -EINVAL;
2370                 }
2371
2372                 ret = i915_gem_object_unbind(obj);
2373                 if (ret != 0) {
2374                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2375                                   ret);
2376                         mutex_unlock(&dev->struct_mutex);
2377                         return ret;
2378                 }
2379         }
2380
2381
2382         return 0;
2383 }
2384
2385 static int
2386 i915_gem_idle(struct drm_device *dev)
2387 {
2388         drm_i915_private_t *dev_priv = dev->dev_private;
2389         uint32_t seqno, cur_seqno, last_seqno;
2390         int stuck, ret;
2391
2392         mutex_lock(&dev->struct_mutex);
2393
2394         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2395                 mutex_unlock(&dev->struct_mutex);
2396                 return 0;
2397         }
2398
2399         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
2400          * We need to replace this with a semaphore, or something.
2401          */
2402         dev_priv->mm.suspended = 1;
2403
2404         /* Cancel the retire work handler, wait for it to finish if running
2405          */
2406         mutex_unlock(&dev->struct_mutex);
2407         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2408         mutex_lock(&dev->struct_mutex);
2409
2410         i915_kernel_lost_context(dev);
2411
2412         /* Flush the GPU along with all non-CPU write domains
2413          */
2414         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2415                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2416         seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2417                                         I915_GEM_DOMAIN_GTT));
2418
2419         if (seqno == 0) {
2420                 mutex_unlock(&dev->struct_mutex);
2421                 return -ENOMEM;
2422         }
2423
2424         dev_priv->mm.waiting_gem_seqno = seqno;
2425         last_seqno = 0;
2426         stuck = 0;
2427         for (;;) {
2428                 cur_seqno = i915_get_gem_seqno(dev);
2429                 if (i915_seqno_passed(cur_seqno, seqno))
2430                         break;
2431                 if (last_seqno == cur_seqno) {
2432                         if (stuck++ > 100) {
2433                                 DRM_ERROR("hardware wedged\n");
2434                                 dev_priv->mm.wedged = 1;
2435                                 DRM_WAKEUP(&dev_priv->irq_queue);
2436                                 break;
2437                         }
2438                 }
2439                 msleep(10);
2440                 last_seqno = cur_seqno;
2441         }
2442         dev_priv->mm.waiting_gem_seqno = 0;
2443
2444         i915_gem_retire_requests(dev);
2445
2446         if (!dev_priv->mm.wedged) {
2447                 /* Active and flushing should now be empty as we've
2448                  * waited for a sequence higher than any pending execbuffer
2449                  */
2450                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2451                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2452                 /* Request should now be empty as we've also waited
2453                  * for the last request in the list
2454                  */
2455                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2456         }
2457
2458         /* Empty the active and flushing lists to inactive.  If there's
2459          * anything left at this point, it means that we're wedged and
2460          * nothing good's going to happen by leaving them there.  So strip
2461          * the GPU domains and just stuff them onto inactive.
2462          */
2463         while (!list_empty(&dev_priv->mm.active_list)) {
2464                 struct drm_i915_gem_object *obj_priv;
2465
2466                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2467                                             struct drm_i915_gem_object,
2468                                             list);
2469                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2470                 i915_gem_object_move_to_inactive(obj_priv->obj);
2471         }
2472
2473         while (!list_empty(&dev_priv->mm.flushing_list)) {
2474                 struct drm_i915_gem_object *obj_priv;
2475
2476                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2477                                             struct drm_i915_gem_object,
2478                                             list);
2479                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2480                 i915_gem_object_move_to_inactive(obj_priv->obj);
2481         }
2482
2483
2484         /* Move all inactive buffers out of the GTT. */
2485         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2486         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2487         if (ret) {
2488                 mutex_unlock(&dev->struct_mutex);
2489                 return ret;
2490         }
2491
2492         i915_gem_cleanup_ringbuffer(dev);
2493         mutex_unlock(&dev->struct_mutex);
2494
2495         return 0;
2496 }
2497
2498 static int
2499 i915_gem_init_hws(struct drm_device *dev)
2500 {
2501         drm_i915_private_t *dev_priv = dev->dev_private;
2502         struct drm_gem_object *obj;
2503         struct drm_i915_gem_object *obj_priv;
2504         int ret;
2505
2506         /* If we need a physical address for the status page, it's already
2507          * initialized at driver load time.
2508          */
2509         if (!I915_NEED_GFX_HWS(dev))
2510                 return 0;
2511
2512         obj = drm_gem_object_alloc(dev, 4096);
2513         if (obj == NULL) {
2514                 DRM_ERROR("Failed to allocate status page\n");
2515                 return -ENOMEM;
2516         }
2517         obj_priv = obj->driver_private;
2518         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2519
2520         ret = i915_gem_object_pin(obj, 4096);
2521         if (ret != 0) {
2522                 drm_gem_object_unreference(obj);
2523                 return ret;
2524         }
2525
2526         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2527
2528         dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2529         if (dev_priv->hw_status_page == NULL) {
2530                 DRM_ERROR("Failed to map status page.\n");
2531                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2532                 drm_gem_object_unreference(obj);
2533                 return -EINVAL;
2534         }
2535         dev_priv->hws_obj = obj;
2536         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2537         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2538         I915_READ(HWS_PGA); /* posting read */
2539         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2540
2541         return 0;
2542 }
2543
2544 static int
2545 i915_gem_init_ringbuffer(struct drm_device *dev)
2546 {
2547         drm_i915_private_t *dev_priv = dev->dev_private;
2548         struct drm_gem_object *obj;
2549         struct drm_i915_gem_object *obj_priv;
2550         int ret;
2551         u32 head;
2552
2553         ret = i915_gem_init_hws(dev);
2554         if (ret != 0)
2555                 return ret;
2556
2557         obj = drm_gem_object_alloc(dev, 128 * 1024);
2558         if (obj == NULL) {
2559                 DRM_ERROR("Failed to allocate ringbuffer\n");
2560                 return -ENOMEM;
2561         }
2562         obj_priv = obj->driver_private;
2563
2564         ret = i915_gem_object_pin(obj, 4096);
2565         if (ret != 0) {
2566                 drm_gem_object_unreference(obj);
2567                 return ret;
2568         }
2569
2570         /* Set up the kernel mapping for the ring. */
2571         dev_priv->ring.Size = obj->size;
2572         dev_priv->ring.tail_mask = obj->size - 1;
2573
2574         dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2575         dev_priv->ring.map.size = obj->size;
2576         dev_priv->ring.map.type = 0;
2577         dev_priv->ring.map.flags = 0;
2578         dev_priv->ring.map.mtrr = 0;
2579
2580         drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2581         if (dev_priv->ring.map.handle == NULL) {
2582                 DRM_ERROR("Failed to map ringbuffer.\n");
2583                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2584                 drm_gem_object_unreference(obj);
2585                 return -EINVAL;
2586         }
2587         dev_priv->ring.ring_obj = obj;
2588         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2589
2590         /* Stop the ring if it's running. */
2591         I915_WRITE(PRB0_CTL, 0);
2592         I915_WRITE(PRB0_TAIL, 0);
2593         I915_WRITE(PRB0_HEAD, 0);
2594
2595         /* Initialize the ring. */
2596         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2597         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2598
2599         /* G45 ring initialization fails to reset head to zero */
2600         if (head != 0) {
2601                 DRM_ERROR("Ring head not reset to zero "
2602                           "ctl %08x head %08x tail %08x start %08x\n",
2603                           I915_READ(PRB0_CTL),
2604                           I915_READ(PRB0_HEAD),
2605                           I915_READ(PRB0_TAIL),
2606                           I915_READ(PRB0_START));
2607                 I915_WRITE(PRB0_HEAD, 0);
2608
2609                 DRM_ERROR("Ring head forced to zero "
2610                           "ctl %08x head %08x tail %08x start %08x\n",
2611                           I915_READ(PRB0_CTL),
2612                           I915_READ(PRB0_HEAD),
2613                           I915_READ(PRB0_TAIL),
2614                           I915_READ(PRB0_START));
2615         }
2616
2617         I915_WRITE(PRB0_CTL,
2618                    ((obj->size - 4096) & RING_NR_PAGES) |
2619                    RING_NO_REPORT |
2620                    RING_VALID);
2621
2622         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2623
2624         /* If the head is still not zero, the ring is dead */
2625         if (head != 0) {
2626                 DRM_ERROR("Ring initialization failed "
2627                           "ctl %08x head %08x tail %08x start %08x\n",
2628                           I915_READ(PRB0_CTL),
2629                           I915_READ(PRB0_HEAD),
2630                           I915_READ(PRB0_TAIL),
2631                           I915_READ(PRB0_START));
2632                 return -EIO;
2633         }
2634
2635         /* Update our cache of the ring state */
2636         i915_kernel_lost_context(dev);
2637
2638         return 0;
2639 }
2640
2641 static void
2642 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2643 {
2644         drm_i915_private_t *dev_priv = dev->dev_private;
2645
2646         if (dev_priv->ring.ring_obj == NULL)
2647                 return;
2648
2649         drm_core_ioremapfree(&dev_priv->ring.map, dev);
2650
2651         i915_gem_object_unpin(dev_priv->ring.ring_obj);
2652         drm_gem_object_unreference(dev_priv->ring.ring_obj);
2653         dev_priv->ring.ring_obj = NULL;
2654         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2655
2656         if (dev_priv->hws_obj != NULL) {
2657                 struct drm_gem_object *obj = dev_priv->hws_obj;
2658                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2659
2660                 kunmap(obj_priv->page_list[0]);
2661                 i915_gem_object_unpin(obj);
2662                 drm_gem_object_unreference(obj);
2663                 dev_priv->hws_obj = NULL;
2664                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2665                 dev_priv->hw_status_page = NULL;
2666
2667                 /* Write high address into HWS_PGA when disabling. */
2668                 I915_WRITE(HWS_PGA, 0x1ffff000);
2669         }
2670 }
2671
2672 int
2673 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2674                        struct drm_file *file_priv)
2675 {
2676         drm_i915_private_t *dev_priv = dev->dev_private;
2677         int ret;
2678
2679         if (dev_priv->mm.wedged) {
2680                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2681                 dev_priv->mm.wedged = 0;
2682         }
2683
2684         ret = i915_gem_init_ringbuffer(dev);
2685         if (ret != 0)
2686                 return ret;
2687
2688         dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
2689                                                         dev->agp->agp_info.aper_size
2690                                                         * 1024 * 1024);
2691
2692         mutex_lock(&dev->struct_mutex);
2693         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2694         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2695         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2696         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2697         dev_priv->mm.suspended = 0;
2698         mutex_unlock(&dev->struct_mutex);
2699
2700         drm_irq_install(dev);
2701
2702         return 0;
2703 }
2704
2705 int
2706 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2707                        struct drm_file *file_priv)
2708 {
2709         drm_i915_private_t *dev_priv = dev->dev_private;
2710         int ret;
2711
2712         ret = i915_gem_idle(dev);
2713         drm_irq_uninstall(dev);
2714
2715         io_mapping_free(dev_priv->mm.gtt_mapping);
2716         return ret;
2717 }
2718
2719 void
2720 i915_gem_lastclose(struct drm_device *dev)
2721 {
2722         int ret;
2723
2724         ret = i915_gem_idle(dev);
2725         if (ret)
2726                 DRM_ERROR("failed to idle hardware: %d\n", ret);
2727 }
2728
2729 void
2730 i915_gem_load(struct drm_device *dev)
2731 {
2732         drm_i915_private_t *dev_priv = dev->dev_private;
2733
2734         INIT_LIST_HEAD(&dev_priv->mm.active_list);
2735         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2736         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2737         INIT_LIST_HEAD(&dev_priv->mm.request_list);
2738         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2739                           i915_gem_retire_work_handler);
2740         dev_priv->mm.next_gem_seqno = 1;
2741
2742         i915_gem_detect_bit_6_swizzle(dev);
2743 }