drm/i915: Don't wait interruptible for possible plane buffer flush
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
index 4755ba4..0f4afa3 100644 (file)
@@ -1200,26 +1200,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        mutex_lock(&dev->struct_mutex);
        if (!obj_priv->gtt_space) {
                ret = i915_gem_object_bind_to_gtt(obj, 0);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+               if (ret)
+                       goto unlock;
+
                list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
                ret = i915_gem_object_set_to_gtt_domain(obj, write);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+               if (ret)
+                       goto unlock;
        }
 
        /* Need a new fence register? */
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+               if (ret)
+                       goto unlock;
        }
 
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1227,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
        mutex_unlock(&dev->struct_mutex);
 
        switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+               return VM_FAULT_NOPAGE;
        case -ENOMEM:
        case -EAGAIN:
                return VM_FAULT_OOM;
-       case -EFAULT:
-       case -EINVAL:
-               return VM_FAULT_SIGBUS;
        default:
-               return VM_FAULT_NOPAGE;
+               return VM_FAULT_SIGBUS;
        }
 }
 
@@ -1293,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
        list->hash.key = list->file_offset_node->start;
        if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
                DRM_ERROR("failed to add to map hash\n");
+               ret = -ENOMEM;
                goto out_free_mm;
        }
 
@@ -1314,7 +1310,7 @@ out_free_list:
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
  *
- * Preserve the reservation of the mmaping with the DRM core code, but
+ * Preserve the reservation of the mmapping with the DRM core code, but
  * relinquish ownership of the pages back to the system.
  *
  * It is vital that we remove the page mapping if we have mapped a tiled
@@ -1588,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  *
  * Returned sequence numbers are nonzero on success.
  */
-static uint32_t
+uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                 uint32_t flush_domains)
 {
@@ -1622,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        OUT_RING(MI_USER_INTERRUPT);
        ADVANCE_LP_RING();
 
-       DRM_DEBUG("%d\n", seqno);
+       DRM_DEBUG_DRIVER("%d\n", seqno);
 
        request->seqno = seqno;
        request->emitted_jiffies = jiffies;
@@ -1775,7 +1771,7 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page)
+       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
                return;
 
        seqno = i915_get_gem_seqno(dev);
@@ -1799,6 +1795,12 @@ i915_gem_retire_requests(struct drm_device *dev)
                } else
                        break;
        }
+
+       if (unlikely (dev_priv->trace_irq_seqno &&
+                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+               i915_user_irq_put(dev);
+               dev_priv->trace_irq_seqno = 0;
+       }
 }
 
 void
@@ -1819,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1836,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
                return -EIO;
 
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-               if (IS_IGDNG(dev))
+               if (IS_IRONLAKE(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                        ier = I915_READ(IER);
@@ -1851,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 
                dev_priv->mm.waiting_gem_seqno = seqno;
                i915_user_irq_get(dev);
-               ret = wait_event_interruptible(dev_priv->irq_queue,
-                                              i915_seqno_passed(i915_get_gem_seqno(dev),
-                                                                seqno) ||
-                                              atomic_read(&dev_priv->mm.wedged));
+               if (interruptible)
+                       ret = wait_event_interruptible(dev_priv->irq_queue,
+                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                               atomic_read(&dev_priv->mm.wedged));
+               else
+                       wait_event(dev_priv->irq_queue,
+                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                               atomic_read(&dev_priv->mm.wedged));
+
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
 
@@ -1878,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
        return ret;
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+       return i915_do_wait_request(dev, seqno, 1);
+}
+
 static void
 i915_gem_flush(struct drm_device *dev,
               uint32_t invalidate_domains,
@@ -1946,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
 #endif
                BEGIN_LP_RING(2);
                OUT_RING(cmd);
-               OUT_RING(0); /* noop */
+               OUT_RING(MI_NOOP);
                ADVANCE_LP_RING();
        }
 }
@@ -2008,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        /* blow away mappings if mapped through GTT */
        i915_gem_release_mmap(obj);
 
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-               i915_gem_clear_fence_reg(obj);
-
        /* Move the object to the CPU domain to ensure that
         * any possible CPU writes while it's not in the GTT
         * are flushed when we go to remap it. This will
@@ -2026,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 
        BUG_ON(obj_priv->active);
 
+       /* release the fence reg _after_ flushing */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
        if (obj_priv->agp_mem != NULL) {
                drm_unbind_agp(obj_priv->agp_mem);
                drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2568,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        bool retry_alloc = false;
        int ret;
 
-       if (dev_priv->mm.suspended)
-               return -EBUSY;
-
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to bind a purgeable object\n");
                return -EINVAL;
@@ -2759,6 +2770,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
                                            old_write_domain);
 }
 
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+       switch (obj->write_domain) {
+       case I915_GEM_DOMAIN_GTT:
+               i915_gem_object_flush_gtt_write_domain(obj);
+               break;
+       case I915_GEM_DOMAIN_CPU:
+               i915_gem_object_flush_cpu_write_domain(obj);
+               break;
+       default:
+               i915_gem_object_flush_gpu_write_domain(obj);
+               break;
+       }
+}
+
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  *
@@ -2810,6 +2837,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        return 0;
 }
 
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_write_domain, old_read_domains;
+       int ret;
+
+       /* Not valid to be called on unbound objects. */
+       if (obj_priv->gtt_space == NULL)
+               return -EINVAL;
+
+       i915_gem_object_flush_gpu_write_domain(obj);
+
+       /* Wait on any GPU rendering and flushing to occur. */
+       if (obj_priv->active) {
+#if WATCH_BUF
+               DRM_INFO("%s: object %p wait for seqno %08x\n",
+                         __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+               if (ret != 0)
+                       return ret;
+       }
+
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
+       obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+       i915_gem_object_flush_cpu_write_domain(obj);
+
+       /* It should now be out of any other write domains, and we can update
+        * the domain values for our changes.
+        */
+       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
+       obj->write_domain = I915_GEM_DOMAIN_GTT;
+       obj_priv->dirty = 1;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
+       return 0;
+}
+
 /**
  * Moves a single object to the CPU read, and possibly write domain.
  *
@@ -3169,7 +3247,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 static int
 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                 struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object *entry,
+                                struct drm_i915_gem_exec_object2 *entry,
                                 struct drm_i915_gem_relocation_entry *relocs)
 {
        struct drm_device *dev = obj->dev;
@@ -3177,12 +3255,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int i, ret;
        void __iomem *reloc_page;
+       bool need_fence;
+
+       need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                    obj_priv->tiling_mode != I915_TILING_NONE;
+
+       /* Check fence reg constraints and rebind if necessary */
+       if (need_fence && !i915_obj_fenceable(dev, obj))
+               i915_gem_object_unbind(obj);
 
        /* Choose the GTT offset for our buffer and put it there. */
        ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
        if (ret)
                return ret;
 
+       /*
+        * Pre-965 chips need a fence register set up in order to
+        * properly handle blits to/from tiled surfaces.
+        */
+       if (need_fence) {
+               ret = i915_gem_object_get_fence_reg(obj);
+               if (ret != 0) {
+                       if (ret != -EBUSY && ret != -ERESTARTSYS)
+                               DRM_ERROR("Failure to install fence: %d\n",
+                                         ret);
+                       i915_gem_object_unpin(obj);
+                       return ret;
+               }
+       }
+
        entry->offset = obj_priv->gtt_offset;
 
        /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3344,7 +3445,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  */
 static int
 i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                             struct drm_i915_gem_execbuffer *exec,
+                             struct drm_i915_gem_execbuffer2 *exec,
                              struct drm_clip_rect *cliprects,
                              uint64_t exec_offset)
 {
@@ -3357,7 +3458,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
 
-       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
 
        count = nbox ? nbox : 1;
 
@@ -3434,7 +3535,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
                              uint32_t buffer_count,
                              struct drm_i915_gem_relocation_entry **relocs)
 {
@@ -3449,8 +3550,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
        }
 
        *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
-       if (*relocs == NULL)
+       if (*relocs == NULL) {
+               DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
                return -ENOMEM;
+       }
 
        for (i = 0; i < buffer_count; i++) {
                struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3474,7 +3577,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
 }
 
 static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
                            uint32_t buffer_count,
                            struct drm_i915_gem_relocation_entry *relocs)
 {
@@ -3507,7 +3610,7 @@ err:
 }
 
 static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
                           uint64_t exec_offset)
 {
        uint32_t exec_start, exec_len;
@@ -3524,22 +3627,57 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
        return 0;
 }
 
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+                              struct drm_gem_object **object_list,
+                              int count)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       DEFINE_WAIT(wait);
+       int i, ret = 0;
+
+       for (;;) {
+               prepare_to_wait(&dev_priv->pending_flip_queue,
+                               &wait, TASK_INTERRUPTIBLE);
+               for (i = 0; i < count; i++) {
+                       obj_priv = object_list[i]->driver_private;
+                       if (atomic_read(&obj_priv->pending_flip) > 0)
+                               break;
+               }
+               if (i == count)
+                       break;
+
+               if (!signal_pending(current)) {
+                       mutex_unlock(&dev->struct_mutex);
+                       schedule();
+                       mutex_lock(&dev->struct_mutex);
+                       continue;
+               }
+               ret = -ERESTARTSYS;
+               break;
+       }
+       finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+       return ret;
+}
+
 int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
-                   struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv,
+                      struct drm_i915_gem_execbuffer2 *args,
+                      struct drm_i915_gem_exec_object2 *exec_list)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_execbuffer *args = data;
-       struct drm_i915_gem_exec_object *exec_list = NULL;
        struct drm_gem_object **object_list = NULL;
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_relocation_entry *relocs;
-       int ret, ret2, i, pinned = 0;
+       int ret = 0, ret2, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t seqno, flush_domains, reloc_index;
-       int pin_tries;
+       int pin_tries, flips;
 
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3550,25 +3688,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
-       /* Copy in the exec list from userland */
-       exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
-       object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
-       if (exec_list == NULL || object_list == NULL) {
-               DRM_ERROR("Failed to allocate exec or object list "
-                         "for %d buffers\n",
+       object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
+       if (object_list == NULL) {
+               DRM_ERROR("Failed to allocate object list for %d buffers\n",
                          args->buffer_count);
                ret = -ENOMEM;
                goto pre_mutex_err;
        }
-       ret = copy_from_user(exec_list,
-                            (struct drm_i915_relocation_entry __user *)
-                            (uintptr_t) args->buffers_ptr,
-                            sizeof(*exec_list) * args->buffer_count);
-       if (ret != 0) {
-               DRM_ERROR("copy %d exec entries failed %d\n",
-                         args->buffer_count, ret);
-               goto pre_mutex_err;
-       }
 
        if (args->num_cliprects != 0) {
                cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3597,20 +3723,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
        if (atomic_read(&dev_priv->mm.wedged)) {
-               DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EIO;
                goto pre_mutex_err;
        }
 
        if (dev_priv->mm.suspended) {
-               DRM_ERROR("Execbuf while VT-switched.\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
                goto pre_mutex_err;
        }
 
        /* Look up object handles */
+       flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
                object_list[i] = drm_gem_object_lookup(dev, file_priv,
                                                       exec_list[i].handle);
@@ -3629,6 +3754,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                        goto err;
                }
                obj_priv->in_execbuffer = true;
+               flips += atomic_read(&obj_priv->pending_flip);
+       }
+
+       if (flips > 0) {
+               ret = i915_gem_wait_for_pending_flip(dev, object_list,
+                                                    args->buffer_count);
+               if (ret)
+                       goto err;
        }
 
        /* Pin and relocate */
@@ -3813,20 +3946,6 @@ err:
 
        mutex_unlock(&dev->struct_mutex);
 
-       if (!ret) {
-               /* Copy the new buffer offsets back to the user's exec list. */
-               ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-                                  (uintptr_t) args->buffers_ptr,
-                                  exec_list,
-                                  sizeof(*exec_list) * args->buffer_count);
-               if (ret) {
-                       ret = -EFAULT;
-                       DRM_ERROR("failed to copy %d exec entries "
-                                 "back to user (%d)\n",
-                                 args->buffer_count, ret);
-               }
-       }
-
        /* Copy the updated relocations out regardless of current error
         * state.  Failure to update the relocs would mean that the next
         * time userland calls execbuf, it would do so with presumed offset
@@ -3843,12 +3962,158 @@ err:
 
 pre_mutex_err:
        drm_free_large(object_list);
-       drm_free_large(exec_list);
        kfree(cliprects);
 
        return ret;
 }
 
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_i915_gem_execbuffer *args = data;
+       struct drm_i915_gem_execbuffer2 exec2;
+       struct drm_i915_gem_exec_object *exec_list = NULL;
+       struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+       int ret, i;
+
+#if WATCH_EXEC
+       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+       if (args->buffer_count < 1) {
+               DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+               return -EINVAL;
+       }
+
+       /* Copy in the exec list from userland */
+       exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+       exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+       if (exec_list == NULL || exec2_list == NULL) {
+               DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+                         args->buffer_count);
+               drm_free_large(exec_list);
+               drm_free_large(exec2_list);
+               return -ENOMEM;
+       }
+       ret = copy_from_user(exec_list,
+                            (struct drm_i915_relocation_entry __user *)
+                            (uintptr_t) args->buffers_ptr,
+                            sizeof(*exec_list) * args->buffer_count);
+       if (ret != 0) {
+               DRM_ERROR("copy %d exec entries failed %d\n",
+                         args->buffer_count, ret);
+               drm_free_large(exec_list);
+               drm_free_large(exec2_list);
+               return -EFAULT;
+       }
+
+       for (i = 0; i < args->buffer_count; i++) {
+               exec2_list[i].handle = exec_list[i].handle;
+               exec2_list[i].relocation_count = exec_list[i].relocation_count;
+               exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+               exec2_list[i].alignment = exec_list[i].alignment;
+               exec2_list[i].offset = exec_list[i].offset;
+               if (!IS_I965G(dev))
+                       exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+               else
+                       exec2_list[i].flags = 0;
+       }
+
+       exec2.buffers_ptr = args->buffers_ptr;
+       exec2.buffer_count = args->buffer_count;
+       exec2.batch_start_offset = args->batch_start_offset;
+       exec2.batch_len = args->batch_len;
+       exec2.DR1 = args->DR1;
+       exec2.DR4 = args->DR4;
+       exec2.num_cliprects = args->num_cliprects;
+       exec2.cliprects_ptr = args->cliprects_ptr;
+       exec2.flags = 0;
+
+       ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+       if (!ret) {
+               /* Copy the new buffer offsets back to the user's exec list. */
+               for (i = 0; i < args->buffer_count; i++)
+                       exec_list[i].offset = exec2_list[i].offset;
+               /* ... and back out to userspace */
+               ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+                                  (uintptr_t) args->buffers_ptr,
+                                  exec_list,
+                                  sizeof(*exec_list) * args->buffer_count);
+               if (ret) {
+                       ret = -EFAULT;
+                       DRM_ERROR("failed to copy %d exec entries "
+                                 "back to user (%d)\n",
+                                 args->buffer_count, ret);
+               }
+       } else {
+               DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
+       }
+
+       drm_free_large(exec_list);
+       drm_free_large(exec2_list);
+       return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_i915_gem_execbuffer2 *args = data;
+       struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+       int ret;
+
+#if WATCH_EXEC
+       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+       if (args->buffer_count < 1) {
+               DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+               return -EINVAL;
+       }
+
+       exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+       if (exec2_list == NULL) {
+               DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+                         args->buffer_count);
+               return -ENOMEM;
+       }
+       ret = copy_from_user(exec2_list,
+                            (struct drm_i915_relocation_entry __user *)
+                            (uintptr_t) args->buffers_ptr,
+                            sizeof(*exec2_list) * args->buffer_count);
+       if (ret != 0) {
+               DRM_ERROR("copy %d exec entries failed %d\n",
+                         args->buffer_count, ret);
+               drm_free_large(exec2_list);
+               return -EFAULT;
+       }
+
+       ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+       if (!ret) {
+               /* Copy the new buffer offsets back to the user's exec list. */
+               ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+                                  (uintptr_t) args->buffers_ptr,
+                                  exec2_list,
+                                  sizeof(*exec2_list) * args->buffer_count);
+               if (ret) {
+                       ret = -EFAULT;
+                       DRM_ERROR("failed to copy %d exec entries "
+                                 "back to user (%d)\n",
+                                 args->buffer_count, ret);
+               }
+       }
+
+       drm_free_large(exec2_list);
+       return ret;
+}
+
 int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
@@ -3862,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
                if (ret)
                        return ret;
        }
-       /*
-        * Pre-965 chips need a fence register set up in order to
-        * properly handle tiled surfaces.
-        */
-       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj);
-               if (ret != 0) {
-                       if (ret != -EBUSY && ret != -ERESTARTSYS)
-                               DRM_ERROR("Failure to install fence: %d\n",
-                                         ret);
-                       return ret;
-               }
-       }
+
        obj_priv->pin_count++;
 
        /* If the object is not active and not pending a flush,
@@ -4355,7 +4608,7 @@ i915_gem_init_hws(struct drm_device *dev)
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        I915_READ(HWS_PGA); /* posting read */
-       DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
        return 0;
 }
@@ -4554,15 +4807,11 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
-
-       return ret;
+       return i915_gem_idle(dev);
 }
 
 void
@@ -4617,8 +4866,8 @@ i915_gem_load(struct drm_device *dev)
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
        }
-
        i915_gem_detect_bit_6_swizzle(dev);
+       init_waitqueue_head(&dev_priv->pending_flip_queue);
 }
 
 /*
@@ -4641,7 +4890,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
 
        phys_obj->id = id;
 
-       phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+       phys_obj->handle = drm_pci_alloc(dev, size, 0);
        if (!phys_obj->handle) {
                ret = -ENOMEM;
                goto kfree_obj;
@@ -4793,7 +5042,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
 
-       DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+       DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
        ret = copy_from_user(obj_addr, user_data, args->size);
        if (ret)
                return -EFAULT;