tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
index 6b209db..a2a3fa5 100644 (file)
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
 #include <linux/swap.h>
 #include <linux/pci.h>
 
 #define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
 
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-                                 uint32_t read_domains,
-                                 uint32_t write_domain);
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -47,18 +45,19 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                                     uint64_t offset,
                                                     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
-static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
@@ -118,7 +117,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_create *args = data;
        struct drm_gem_object *obj;
-       int handle, ret;
+       int ret;
+       u32 handle;
 
        args->size = roundup(args->size, PAGE_SIZE);
 
@@ -140,6 +140,345 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static inline int
+fast_shmem_read(struct page **pages,
+               loff_t page_base, int page_offset,
+               char __user *data,
+               int length)
+{
+       char __iomem *vaddr;
+       int unwritten;
+
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+       unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       kunmap_atomic(vaddr, KM_USER0);
+
+       if (unwritten)
+               return -EFAULT;
+
+       return 0;
+}
+
+static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+{
+       drm_i915_private_t *dev_priv = obj->dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+               obj_priv->tiling_mode != I915_TILING_NONE;
+}
+
+static inline int
+slow_shmem_copy(struct page *dst_page,
+               int dst_offset,
+               struct page *src_page,
+               int src_offset,
+               int length)
+{
+       char *dst_vaddr, *src_vaddr;
+
+       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+       if (dst_vaddr == NULL)
+               return -ENOMEM;
+
+       src_vaddr = kmap_atomic(src_page, KM_USER1);
+       if (src_vaddr == NULL) {
+               kunmap_atomic(dst_vaddr, KM_USER0);
+               return -ENOMEM;
+       }
+
+       memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+       kunmap_atomic(src_vaddr, KM_USER1);
+       kunmap_atomic(dst_vaddr, KM_USER0);
+
+       return 0;
+}
+
+static inline int
+slow_shmem_bit17_copy(struct page *gpu_page,
+                     int gpu_offset,
+                     struct page *cpu_page,
+                     int cpu_offset,
+                     int length,
+                     int is_read)
+{
+       char *gpu_vaddr, *cpu_vaddr;
+
+       /* Use the unswizzled path if this page isn't affected. */
+       if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+               if (is_read)
+                       return slow_shmem_copy(cpu_page, cpu_offset,
+                                              gpu_page, gpu_offset, length);
+               else
+                       return slow_shmem_copy(gpu_page, gpu_offset,
+                                              cpu_page, cpu_offset, length);
+       }
+
+       gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
+       if (gpu_vaddr == NULL)
+               return -ENOMEM;
+
+       cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
+       if (cpu_vaddr == NULL) {
+               kunmap_atomic(gpu_vaddr, KM_USER0);
+               return -ENOMEM;
+       }
+
+       /* Copy the data, XORing A6 with A17 (1). The user already knows he's
+        * XORing with the other bits (A9 for Y, A9 and A10 for X)
+        */
+       while (length > 0) {
+               int cacheline_end = ALIGN(gpu_offset + 1, 64);
+               int this_length = min(cacheline_end - gpu_offset, length);
+               int swizzled_gpu_offset = gpu_offset ^ 64;
+
+               if (is_read) {
+                       memcpy(cpu_vaddr + cpu_offset,
+                              gpu_vaddr + swizzled_gpu_offset,
+                              this_length);
+               } else {
+                       memcpy(gpu_vaddr + swizzled_gpu_offset,
+                              cpu_vaddr + cpu_offset,
+                              this_length);
+               }
+               cpu_offset += this_length;
+               gpu_offset += this_length;
+               length -= this_length;
+       }
+
+       kunmap_atomic(cpu_vaddr, KM_USER1);
+       kunmap_atomic(gpu_vaddr, KM_USER0);
+
+       return 0;
+}
+
+/**
+ * This is the fast shmem pread path, which attempts to copy_from_user directly
+ * from the backing pages of the object to the user's address space.  On a
+ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+ */
+static int
+i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+
+               ret = fast_shmem_read(obj_priv->pages,
+                                     page_base, page_offset,
+                                     user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+       return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+       mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
+static int
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+{
+       int ret;
+
+       ret = i915_gem_object_get_pages(obj);
+
+       /* If we've insufficient memory to map in the pages, attempt
+        * to make some space by throwing out some old buffers.
+        */
+       if (ret == -ENOMEM) {
+               struct drm_device *dev = obj->dev;
+               gfp_t gfp;
+
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
+                       return ret;
+
+               gfp = i915_gem_object_get_page_gfp_mask(obj);
+               i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+               ret = i915_gem_object_get_pages(obj);
+               i915_gem_object_set_page_gfp_mask (obj, gfp);
+       }
+
+       return ret;
+}
+
+/**
+ * This is the fallback shmem pread path, which allocates temporary storage
+ * in kernel space to copy_to_user into outside of the struct_mutex, so we
+ * can copy out of the object's backing pages while holding the struct mutex
+ * and not take page faults.
+ */
+static int
+i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+       int do_bit17_swizzling;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, yet we want to hold it while
+        * dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 1, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
+       }
+
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               if (do_bit17_swizzling) {
+                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                                                   shmem_page_offset,
+                                                   user_pages[data_page_index],
+                                                   data_page_offset,
+                                                   page_length,
+                                                   1);
+               } else {
+                       ret = slow_shmem_copy(user_pages[data_page_index],
+                                             data_page_offset,
+                                             obj_priv->pages[shmem_page_index],
+                                             shmem_page_offset,
+                                             page_length);
+               }
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
+       }
+
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++) {
+               SetPageDirty(user_pages[i]);
+               page_cache_release(user_pages[i]);
+       }
+       drm_free_large(user_pages);
+
+       return ret;
+}
+
 /**
  * Reads data from the object referenced by handle.
  *
@@ -152,8 +491,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       ssize_t read;
-       loff_t offset;
        int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -171,33 +508,18 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-
-       offset = args->offset;
-
-       read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
-                       args->size, &offset);
-       if (read != args->size) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               if (read < 0)
-                       return read;
-               else
-                       return -EINVAL;
+       if (i915_gem_object_needs_bit17_swizzle(obj)) {
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+       } else {
+               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+               if (ret != 0)
+                       ret = i915_gem_shmem_pread_slow(dev, obj, args,
+                                                       file_priv);
        }
 
        drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
 
-       return 0;
+       return ret;
 }
 
 /* This is the fast write path which cannot handle
@@ -215,7 +537,7 @@ fast_user_write(struct io_mapping *mapping,
 
        vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
-                                                     user_data, length, length);
+                                                     user_data, length);
        io_mapping_unmap_atomic(vaddr_atomic);
        if (unwritten)
                return -EFAULT;
@@ -227,29 +549,54 @@ fast_user_write(struct io_mapping *mapping,
  */
 
 static inline int
-slow_user_write(struct io_mapping *mapping,
-               loff_t page_base, int page_offset,
-               char __user *user_data,
-               int length)
+slow_kernel_write(struct io_mapping *mapping,
+                 loff_t gtt_base, int gtt_offset,
+                 struct page *user_page, int user_offset,
+                 int length)
+{
+       char *src_vaddr, *dst_vaddr;
+       unsigned long unwritten;
+
+       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+       src_vaddr = kmap_atomic(user_page, KM_USER1);
+       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+                                                     src_vaddr + user_offset,
+                                                     length);
+       kunmap_atomic(src_vaddr, KM_USER1);
+       io_mapping_unmap_atomic(dst_vaddr);
+       if (unwritten)
+               return -EFAULT;
+       return 0;
+}
+
+static inline int
+fast_shmem_write(struct page **pages,
+                loff_t page_base, int page_offset,
+                char __user *data,
+                int length)
 {
        char __iomem *vaddr;
        unsigned long unwritten;
 
-       vaddr = io_mapping_map_wc(mapping, page_base);
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
        if (vaddr == NULL)
-               return -EFAULT;
-       unwritten = __copy_from_user(vaddr + page_offset,
-                                    user_data, length);
-       io_mapping_unmap(vaddr);
+               return -ENOMEM;
+       unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       kunmap_atomic(vaddr, KM_USER0);
+
        if (unwritten)
                return -EFAULT;
        return 0;
 }
 
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
 static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                   struct drm_i915_gem_pwrite *args,
-                   struct drm_file *file_priv)
+i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -276,7 +623,183 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                goto fail;
 
        obj_priv = obj->driver_private;
-       offset = obj_priv->gtt_offset + args->offset;
+       offset = obj_priv->gtt_offset + args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+
+               ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
+                                      page_offset, user_data, page_length);
+
+               /* If we get a fault while copying data, then (presumably) our
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
+                */
+               if (ret)
+                       goto fail;
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+fail:
+       i915_gem_object_unpin(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+ */
+static int
+i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       ssize_t remain;
+       loff_t gtt_page_base, offset;
+       loff_t first_data_page, last_data_page, num_pages;
+       loff_t pinned_pages, i;
+       struct page **user_pages;
+       struct mm_struct *mm = current->mm;
+       int gtt_page_offset, data_page_offset, data_page_index, page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto out_unpin_pages;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_pin(obj, 0);
+       if (ret)
+               goto out_unlock;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+       if (ret)
+               goto out_unpin_object;
+
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * gtt_page_base = page offset within aperture
+                * gtt_page_offset = offset within page in aperture
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               gtt_page_base = offset & PAGE_MASK;
+               gtt_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((gtt_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - gtt_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                       gtt_page_base, gtt_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
+
+               /* If we get a fault while copying data, then (presumably) our
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
+                */
+               if (ret)
+                       goto out_unpin_object;
+
+               remain -= page_length;
+               offset += page_length;
+               data_ptr += page_length;
+       }
+
+out_unpin_object:
+       i915_gem_object_unpin(obj);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+out_unpin_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       drm_free_large(user_pages);
+
+       return ret;
+}
+
+/**
+ * This is the fast shmem pwrite path, which attempts to directly
+ * copy_from_user into the kmapped pages backing the object.
+ */
+static int
+i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
        obj_priv->dirty = 1;
 
        while (remain > 0) {
@@ -292,66 +815,141 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
-                                      page_offset, user_data, page_length);
-
-               /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available. In this case, use the
-                * non-atomic function
-                */
-               if (ret) {
-                       ret = slow_user_write (dev_priv->mm.gtt_mapping,
-                                              page_base, page_offset,
-                                              user_data, page_length);
-                       if (ret)
-                               goto fail;
-               }
+               ret = fast_shmem_write(obj_priv->pages,
+                                      page_base, page_offset,
+                                      user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
 
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
 
-fail:
-       i915_gem_object_unpin(obj);
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
 
+/**
+ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This avoids taking mmap_sem for faulting on the user's address while the
+ * struct_mutex is held.
+ */
 static int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                     struct drm_i915_gem_pwrite *args,
-                     struct drm_file *file_priv)
+i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
 {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
        int ret;
-       loff_t offset;
-       ssize_t written;
+       uint64_t data_ptr = args->data_ptr;
+       int do_bit17_swizzling;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
+       }
+
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
        mutex_lock(&dev->struct_mutex);
 
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
+               goto fail_unlock;
+
        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret != 0)
+               goto fail_put_pages;
 
+       obj_priv = obj->driver_private;
        offset = args->offset;
+       obj_priv->dirty = 1;
 
-       written = vfs_write(obj->filp,
-                           (char __user *)(uintptr_t) args->data_ptr,
-                           args->size, &offset);
-       if (written != args->size) {
-               mutex_unlock(&dev->struct_mutex);
-               if (written < 0)
-                       return written;
-               else
-                       return -EINVAL;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               if (do_bit17_swizzling) {
+                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                                                   shmem_page_offset,
+                                                   user_pages[data_page_index],
+                                                   data_page_offset,
+                                                   page_length,
+                                                   0);
+               } else {
+                       ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                             shmem_page_offset,
+                                             user_pages[data_page_index],
+                                             data_page_offset,
+                                             page_length);
+               }
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
        }
 
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
        mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       drm_free_large(user_pages);
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -392,10 +990,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0)
-               ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
-       else
-               ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+                dev->gtt_total != 0) {
+               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+                                                      file_priv);
+               }
+       } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
+               ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
+       } else {
+               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+                                                        file_priv);
+               }
+       }
 
 #if WATCH_PWRITE
        if (ret)
@@ -415,8 +1024,10 @@ int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
        struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
        uint32_t read_domains = args->read_domains;
        uint32_t write_domain = args->write_domain;
        int ret;
@@ -425,10 +1036,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return -ENODEV;
 
        /* Only handle setting domains to types used by the CPU. */
-       if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+       if (write_domain & I915_GEM_GPU_DOMAINS)
                return -EINVAL;
 
-       if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+       if (read_domains & I915_GEM_GPU_DOMAINS)
                return -EINVAL;
 
        /* Having something in the write domain implies it's in the read
@@ -440,15 +1051,27 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
+       obj_priv = obj->driver_private;
 
        mutex_lock(&dev->struct_mutex);
+
+       intel_mark_busy(dev, obj);
+
 #if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
                 obj, obj->size, read_domains, write_domain);
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
+               /* Update the LRU on the fence for the CPU access that's
+                * about to occur.
+                */
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       list_move_tail(&obj_priv->fence_list,
+                                      &dev_priv->mm.fence_list);
+               }
+
                /* Silently promote "you're not bound, there was nothing to do"
                 * to success, since the client was just asking us to
                 * make sure everything was done.
@@ -487,7 +1110,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
 #if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %d)\n",
+       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
        obj_priv = obj->driver_private;
@@ -576,22 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
        if (!obj_priv->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
-               list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
+               if (ret)
+                       goto unlock;
+
+               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, write);
+               if (ret)
+                       goto unlock;
        }
 
        /* Need a new fence register? */
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj, write);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
+               if (ret)
+                       goto unlock;
        }
 
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -599,19 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
        mutex_unlock(&dev->struct_mutex);
 
        switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+               return VM_FAULT_NOPAGE;
        case -ENOMEM:
        case -EAGAIN:
                return VM_FAULT_OOM;
-       case -EFAULT:
-       case -EBUSY:
-               DRM_ERROR("can't insert pfn??  fault or busy...\n");
-               return VM_FAULT_SIGBUS;
        default:
-               return VM_FAULT_NOPAGE;
+               return VM_FAULT_SIGBUS;
        }
 }
 
@@ -633,13 +1255,12 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
        struct drm_gem_mm *mm = dev->mm_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_map_list *list;
-       struct drm_map *map;
+       struct drm_local_map *map;
        int ret = 0;
 
        /* Set the object up for mmap'ing */
        list = &obj->map_list;
-       list->map = drm_calloc(1, sizeof(struct drm_map_list),
-                              DRM_MEM_DRIVER);
+       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
        if (!list->map)
                return -ENOMEM;
 
@@ -679,12 +1300,61 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 out_free_mm:
        drm_mm_put_block(list->file_offset_node);
 out_free_list:
-       drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+       kfree(list->map);
 
        return ret;
 }
 
 /**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (dev->dev_mapping)
+               unmap_mapping_range(dev->dev_mapping,
+                                   obj_priv->mmap_offset, obj->size, 1);
+}
+
+static void
+i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_map_list *list;
+
+       list = &obj->map_list;
+       drm_ht_remove_item(&mm->offset_hash, &list->hash);
+
+       if (list->file_offset_node) {
+               drm_mm_put_block(list->file_offset_node);
+               list->file_offset_node = NULL;
+       }
+
+       if (list->map) {
+               kfree(list->map);
+               list->map = NULL;
+       }
+
+       obj_priv->mmap_offset = 0;
+}
+
+/**
  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  * @obj: object to check
  *
@@ -756,36 +1426,37 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        obj_priv = obj->driver_private;
 
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+
        if (!obj_priv->mmap_offset) {
                ret = i915_gem_create_mmap_offset(obj);
-               if (ret)
+               if (ret) {
+                       drm_gem_object_unreference(obj);
+                       mutex_unlock(&dev->struct_mutex);
                        return ret;
+               }
        }
 
        args->offset = obj_priv->mmap_offset;
 
-       obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
-       /* Make sure the alignment is correct for fence regs etc */
-       if (obj_priv->agp_mem &&
-           (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
        /*
         * Pull it into the GTT so that we have a page list (makes the
         * initial fault faster and any subsequent flushing possible).
         */
        if (!obj_priv->agp_mem) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        drm_gem_object_unreference(obj);
                        mutex_unlock(&dev->struct_mutex);
                        return ret;
                }
-               list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
        }
 
        drm_gem_object_unreference(obj);
@@ -794,30 +1465,41 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
+void
+i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count = obj->size / PAGE_SIZE;
        int i;
 
-       if (obj_priv->page_list == NULL)
+       BUG_ON(obj_priv->pages_refcount == 0);
+       BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+
+       if (--obj_priv->pages_refcount != 0)
                return;
 
+       if (obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_save_bit_17_swizzle(obj);
 
-       for (i = 0; i < page_count; i++)
-               if (obj_priv->page_list[i] != NULL) {
-                       if (obj_priv->dirty)
-                               set_page_dirty(obj_priv->page_list[i]);
-                       mark_page_accessed(obj_priv->page_list[i]);
-                       page_cache_release(obj_priv->page_list[i]);
-               }
+       if (obj_priv->madv == I915_MADV_DONTNEED)
+               obj_priv->dirty = 0;
+
+       for (i = 0; i < page_count; i++) {
+               if (obj_priv->pages[i] == NULL)
+                       break;
+
+               if (obj_priv->dirty)
+                       set_page_dirty(obj_priv->pages[i]);
+
+               if (obj_priv->madv == I915_MADV_WILLNEED)
+                       mark_page_accessed(obj_priv->pages[i]);
+
+               page_cache_release(obj_priv->pages[i]);
+       }
        obj_priv->dirty = 0;
 
-       drm_free(obj_priv->page_list,
-                page_count * sizeof(struct page *),
-                DRM_MEM_DRIVER);
-       obj_priv->page_list = NULL;
+       drm_free_large(obj_priv->pages);
+       obj_priv->pages = NULL;
 }
 
 static void
@@ -833,8 +1515,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
                obj_priv->active = 1;
        }
        /* Move from whatever list we were on to the tail of execution. */
+       spin_lock(&dev_priv->mm.active_list_lock);
        list_move_tail(&obj_priv->list,
                       &dev_priv->mm.active_list);
+       spin_unlock(&dev_priv->mm.active_list_lock);
        obj_priv->last_rendering_seqno = seqno;
 }
 
@@ -850,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
        obj_priv->last_rendering_seqno = 0;
 }
 
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct inode *inode;
+
+       inode = obj->filp->f_path.dentry->d_inode;
+       if (inode->i_op->truncate)
+               inode->i_op->truncate (inode);
+
+       obj_priv->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 {
@@ -880,15 +1584,20 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  * Returned sequence numbers are nonzero on success.
  */
 static uint32_t
-i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+                uint32_t flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *i915_file_priv = NULL;
        struct drm_i915_gem_request *request;
        uint32_t seqno;
        int was_empty;
        RING_LOCALS;
 
-       request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+       if (file_priv != NULL)
+               i915_file_priv = file_priv->driver_priv;
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
        if (request == NULL)
                return 0;
 
@@ -914,6 +1623,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
        request->emitted_jiffies = jiffies;
        was_empty = list_empty(&dev_priv->mm.request_list);
        list_add_tail(&request->list, &dev_priv->mm.request_list);
+       if (i915_file_priv) {
+               list_add_tail(&request->client_list,
+                             &i915_file_priv->mm.request_list);
+       } else {
+               INIT_LIST_HEAD(&request->client_list);
+       }
 
        /* Associate any objects on the flushing list matching the write
         * domain we're flushing with our flush.
@@ -927,15 +1642,24 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
 
                        if ((obj->write_domain & flush_domains) ==
                            obj->write_domain) {
+                               uint32_t old_write_domain = obj->write_domain;
+
                                obj->write_domain = 0;
                                i915_gem_object_move_to_active(obj, seqno);
+
+                               trace_i915_gem_object_change_domain(obj,
+                                                                   obj->read_domains,
+                                                                   old_write_domain);
                        }
                }
 
        }
 
-       if (was_empty && !dev_priv->mm.suspended)
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+       if (!dev_priv->mm.suspended) {
+               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               if (was_empty)
+                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+       }
        return seqno;
 }
 
@@ -973,9 +1697,12 @@ i915_gem_retire_request(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
+       trace_i915_gem_request_retire(dev, request->seqno);
+
        /* Move any buffers on the active list that are no longer referenced
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
+       spin_lock(&dev_priv->mm.active_list_lock);
        while (!list_empty(&dev_priv->mm.active_list)) {
                struct drm_gem_object *obj;
                struct drm_i915_gem_object *obj_priv;
@@ -990,7 +1717,7 @@ i915_gem_retire_request(struct drm_device *dev,
                 * this seqno.
                 */
                if (obj_priv->last_rendering_seqno != request->seqno)
-                       return;
+                       goto out;
 
 #if WATCH_LRU
                DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -999,15 +1726,28 @@ i915_gem_retire_request(struct drm_device *dev,
 
                if (obj->write_domain != 0)
                        i915_gem_object_move_to_flushing(obj);
-               else
+               else {
+                       /* Take a reference on the object so it won't be
+                        * freed while the spinlock is held.  The list
+                        * protection for this spinlock is safe when breaking
+                        * the lock like this since the next thing we do
+                        * is just get the head of the list again.
+                        */
+                       drm_gem_object_reference(obj);
                        i915_gem_object_move_to_inactive(obj);
+                       spin_unlock(&dev_priv->mm.active_list_lock);
+                       drm_gem_object_unreference(obj);
+                       spin_lock(&dev_priv->mm.active_list_lock);
+               }
        }
+out:
+       spin_unlock(&dev_priv->mm.active_list_lock);
 }
 
 /**
  * Returns true if seq1 is later than seq2.
  */
-static int
+bool
 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 {
        return (int32_t)(seq1 - seq2) >= 0;
@@ -1030,6 +1770,9 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
+       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+               return;
+
        seqno = i915_get_gem_seqno(dev);
 
        while (!list_empty(&dev_priv->mm.request_list)) {
@@ -1042,14 +1785,21 @@ i915_gem_retire_requests(struct drm_device *dev)
                retiring_seqno = request->seqno;
 
                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   dev_priv->mm.wedged) {
+                   atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);
 
                        list_del(&request->list);
-                       drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+                       list_del(&request->client_list);
+                       kfree(request);
                } else
                        break;
        }
+
+       if (unlikely (dev_priv->trace_irq_seqno &&
+                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+               i915_user_irq_put(dev);
+               dev_priv->trace_irq_seqno = 0;
+       }
 }
 
 void
@@ -1066,7 +1816,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        i915_gem_retire_requests(dev);
        if (!dev_priv->mm.suspended &&
            !list_empty(&dev_priv->mm.request_list))
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1078,21 +1828,40 @@ static int
 i915_wait_request(struct drm_device *dev, uint32_t seqno)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 ier;
        int ret = 0;
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+               if (IS_IGDNG(dev))
+                       ier = I915_READ(DEIER) | I915_READ(GTIER);
+               else
+                       ier = I915_READ(IER);
+               if (!ier) {
+                       DRM_ERROR("something (likely vbetool) disabled "
+                                 "interrupts, re-enabling\n");
+                       i915_driver_irq_preinstall(dev);
+                       i915_driver_irq_postinstall(dev);
+               }
+
+               trace_i915_gem_request_wait_begin(dev, seqno);
+
                dev_priv->mm.waiting_gem_seqno = seqno;
                i915_user_irq_get(dev);
                ret = wait_event_interruptible(dev_priv->irq_queue,
                                               i915_seqno_passed(i915_get_gem_seqno(dev),
                                                                 seqno) ||
-                                              dev_priv->mm.wedged);
+                                              atomic_read(&dev_priv->mm.wedged));
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
+
+               trace_i915_gem_request_wait_end(dev, seqno);
        }
-       if (dev_priv->mm.wedged)
+       if (atomic_read(&dev_priv->mm.wedged))
                ret = -EIO;
 
        if (ret && ret != -ERESTARTSYS)
@@ -1123,12 +1892,13 @@ i915_gem_flush(struct drm_device *dev,
        DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
                  invalidate_domains, flush_domains);
 #endif
+       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+                                    invalidate_domains, flush_domains);
 
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
 
-       if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
-                                                    I915_GEM_DOMAIN_GTT)) {
+       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
                /*
                 * read/write caches:
                 *
@@ -1222,7 +1992,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       loff_t offset;
        int ret = 0;
 
 #if WATCH_BUF
@@ -1237,6 +2006,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return -EINVAL;
        }
 
+       /* blow away mappings if mapped through GTT */
+       i915_gem_release_mmap(obj);
+
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
        /* Move the object to the CPU domain to ensure that
         * any possible CPU writes while it's not in the GTT
         * are flushed when we go to remap it. This will
@@ -1250,23 +2025,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return ret;
        }
 
+       BUG_ON(obj_priv->active);
+
        if (obj_priv->agp_mem != NULL) {
                drm_unbind_agp(obj_priv->agp_mem);
                drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
                obj_priv->agp_mem = NULL;
        }
 
-       BUG_ON(obj_priv->active);
-
-       /* blow away mappings if mapped through GTT */
-       offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
-       if (dev->dev_mapping)
-               unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
-
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-               i915_gem_clear_fence_reg(obj);
-
-       i915_gem_object_free_page_list(obj);
+       i915_gem_object_put_pages(obj);
+       BUG_ON(obj_priv->pages_refcount);
 
        if (obj_priv->gtt_space) {
                atomic_dec(&dev->gtt_count);
@@ -1280,40 +2048,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        if (!list_empty(&obj_priv->list))
                list_del_init(&obj_priv->list);
 
+       if (i915_gem_object_is_purgeable(obj_priv))
+               i915_gem_object_truncate(obj);
+
+       trace_i915_gem_object_unbind(obj);
+
+       return 0;
+}
+
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *best = NULL;
+       struct drm_gem_object *first = NULL;
+
+       /* Try to find the smallest clean object */
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->size >= min_size) {
+                       if ((!obj_priv->dirty ||
+                            i915_gem_object_is_purgeable(obj_priv)) &&
+                           (!best || obj->size < best->size)) {
+                               best = obj;
+                               if (best->size == min_size)
+                                       return best;
+                       }
+                       if (!first)
+                           first = obj;
+               }
+       }
+
+       return best ? best : first;
+}
+
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t seqno;
+       int ret;
+       bool lists_empty;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->mm.active_list));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       if (lists_empty)
+               return -ENOSPC;
+
+       /* Flush everything (on to the inactive lists) and evict */
+       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+       if (seqno == 0)
+               return -ENOMEM;
+
+       ret = i915_wait_request(dev, seqno);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_evict_from_inactive_list(dev);
+       if (ret)
+               return ret;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->mm.active_list));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!lists_empty);
+
        return 0;
 }
 
 static int
-i915_gem_evict_something(struct drm_device *dev)
+i915_gem_evict_something(struct drm_device *dev, int min_size)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret = 0;
+       int ret;
 
        for (;;) {
+               i915_gem_retire_requests(dev);
+
                /* If there's an inactive buffer available now, grab it
                 * and be done.
                 */
-               if (!list_empty(&dev_priv->mm.inactive_list)) {
-                       obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
-                                                   struct drm_i915_gem_object,
-                                                   list);
-                       obj = obj_priv->obj;
-                       BUG_ON(obj_priv->pin_count != 0);
+               obj = i915_gem_find_inactive_object(dev, min_size);
+               if (obj) {
+                       struct drm_i915_gem_object *obj_priv;
+
 #if WATCH_LRU
                        DRM_INFO("%s: evicting %p\n", __func__, obj);
 #endif
+                       obj_priv = obj->driver_private;
+                       BUG_ON(obj_priv->pin_count != 0);
                        BUG_ON(obj_priv->active);
 
                        /* Wait on the rendering and unbind the buffer. */
-                       ret = i915_gem_object_unbind(obj);
-                       break;
+                       return i915_gem_object_unbind(obj);
                }
 
                /* If we didn't get anything, but the ring is still processing
-                * things, wait for one of those things to finish and hopefully
-                * leave us a buffer to evict.
+                * things, wait for the next to finish and hopefully leave us
+                * a buffer to evict.
                 */
                if (!list_empty(&dev_priv->mm.request_list)) {
                        struct drm_i915_gem_request *request;
@@ -1324,16 +2165,9 @@ i915_gem_evict_something(struct drm_device *dev)
 
                        ret = i915_wait_request(dev, request->seqno);
                        if (ret)
-                               break;
+                               return ret;
 
-                       /* if waiting caused an object to become inactive,
-                        * then loop around and wait for it. Otherwise, we
-                        * assume that waiting freed and unbound something,
-                        * so there should now be some space in the GTT
-                        */
-                       if (!list_empty(&dev_priv->mm.inactive_list))
-                               continue;
-                       break;
+                       continue;
                }
 
                /* If we didn't have anything on the request list but there
@@ -1342,50 +2176,48 @@ i915_gem_evict_something(struct drm_device *dev)
                 * will get moved to inactive.
                 */
                if (!list_empty(&dev_priv->mm.flushing_list)) {
-                       obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-                                                   struct drm_i915_gem_object,
-                                                   list);
-                       obj = obj_priv->obj;
+                       struct drm_i915_gem_object *obj_priv;
+
+                       /* Find an object that we can immediately reuse */
+                       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+                               obj = obj_priv->obj;
+                               if (obj->size >= min_size)
+                                       break;
+
+                               obj = NULL;
+                       }
 
-                       i915_gem_flush(dev,
-                                      obj->write_domain,
-                                      obj->write_domain);
-                       i915_add_request(dev, obj->write_domain);
+                       if (obj != NULL) {
+                               uint32_t seqno;
 
-                       obj = NULL;
-                       continue;
-               }
+                               i915_gem_flush(dev,
+                                              obj->write_domain,
+                                              obj->write_domain);
+                               seqno = i915_add_request(dev, NULL, obj->write_domain);
+                               if (seqno == 0)
+                                       return -ENOMEM;
 
-               DRM_ERROR("inactive empty %d request empty %d "
-                         "flushing empty %d\n",
-                         list_empty(&dev_priv->mm.inactive_list),
-                         list_empty(&dev_priv->mm.request_list),
-                         list_empty(&dev_priv->mm.flushing_list));
-               /* If we didn't do any of the above, there's nothing to be done
-                * and we just can't fit it in.
-                */
-               return -ENOMEM;
-       }
-       return ret;
-}
+                               ret = i915_wait_request(dev, seqno);
+                               if (ret)
+                                       return ret;
 
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
-       int ret;
+                               continue;
+                       }
+               }
 
-       for (;;) {
-               ret = i915_gem_evict_something(dev);
-               if (ret != 0)
-                       break;
+               /* If we didn't do any of the above, there's no single buffer
+                * large enough to swap out for the new one, so just evict
+                * everything and start again. (This should be rare.)
+                */
+               if (!list_empty (&dev_priv->mm.inactive_list))
+                       return i915_gem_evict_from_inactive_list(dev);
+               else
+                       return i915_gem_evict_everything(dev);
        }
-       if (ret == -ENOMEM)
-               return 0;
-       return ret;
 }
 
-static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
+int
+i915_gem_object_get_pages(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count, i;
@@ -1394,18 +2226,17 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
        struct page *page;
        int ret;
 
-       if (obj_priv->page_list)
+       if (obj_priv->pages_refcount++ != 0)
                return 0;
 
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         */
        page_count = obj->size / PAGE_SIZE;
-       BUG_ON(obj_priv->page_list != NULL);
-       obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
-                                        DRM_MEM_DRIVER);
-       if (obj_priv->page_list == NULL) {
-               DRM_ERROR("Faled to allocate page list\n");
+       BUG_ON(obj_priv->pages != NULL);
+       obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
+       if (obj_priv->pages == NULL) {
+               obj_priv->pages_refcount--;
                return -ENOMEM;
        }
 
@@ -1415,12 +2246,15 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
                page = read_mapping_page(mapping, i, NULL);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
-                       DRM_ERROR("read_mapping_page failed: %d\n", ret);
-                       i915_gem_object_free_page_list(obj);
+                       i915_gem_object_put_pages(obj);
                        return ret;
                }
-               obj_priv->page_list[i] = page;
+               obj_priv->pages[i] = page;
        }
+
+       if (obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_do_bit_17_swizzle(obj);
+
        return 0;
 }
 
@@ -1452,7 +2286,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int regnum = obj_priv->fence_reg;
        int tile_width;
-       uint32_t val;
+       uint32_t fence_reg, val;
        uint32_t pitch_val;
 
        if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
@@ -1479,7 +2313,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
        val |= I830_FENCE_REG_VALID;
 
-       I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+       if (regnum < 8)
+               fence_reg = FENCE_REG_830_0 + (regnum * 4);
+       else
+               fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
+       I915_WRITE(fence_reg, val);
 }
 
 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -1491,31 +2329,34 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        int regnum = obj_priv->fence_reg;
        uint32_t val;
        uint32_t pitch_val;
+       uint32_t fence_size_bits;
 
-       if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
+       if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
            (obj_priv->gtt_offset & (obj->size - 1))) {
-               WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
+               WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
                     __func__, obj_priv->gtt_offset);
                return;
        }
 
-       pitch_val = (obj_priv->stride / 128) - 1;
+       pitch_val = obj_priv->stride / 128;
+       pitch_val = ffs(pitch_val) - 1;
+       WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
 
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-       val |= I830_FENCE_SIZE_BITS(obj->size);
+       fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
+       WARN_ON(fence_size_bits & ~0x00000f00);
+       val |= fence_size_bits;
        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
        val |= I830_FENCE_REG_VALID;
 
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
-
 }
 
 /**
  * i915_gem_object_get_fence_reg - set up a fence reg for an object
  * @obj: object to map through a fence reg
- * @write: object is about to be written
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -1526,14 +2367,21 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  * It then sets up the reg based on the object's properties: address, pitch
  * and tiling format.
  */
-static int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
+int
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_i915_fence_reg *reg = NULL;
-       int i, ret;
+       struct drm_i915_gem_object *old_obj_priv = NULL;
+       int i, ret, avail;
+
+       /* Just update our place in the LRU if our fence is getting used. */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               return 0;
+       }
 
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
@@ -1556,53 +2404,75 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
        }
 
        /* First try to find a free reg */
+       avail = 0;
        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
                if (!reg->obj)
                        break;
+
+               old_obj_priv = reg->obj->driver_private;
+               if (!old_obj_priv->pin_count)
+                   avail++;
        }
 
        /* None available, try to steal one or wait for a user to finish */
        if (i == dev_priv->num_fence_regs) {
-               struct drm_i915_gem_object *old_obj_priv = NULL;
-               loff_t offset;
-
-try_again:
-               /* Could try to use LRU here instead... */
-               for (i = dev_priv->fence_reg_start;
-                    i < dev_priv->num_fence_regs; i++) {
-                       reg = &dev_priv->fence_regs[i];
-                       old_obj_priv = reg->obj->driver_private;
-                       if (!old_obj_priv->pin_count)
+               struct drm_gem_object *old_obj = NULL;
+
+               if (avail == 0)
+                       return -ENOSPC;
+
+               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+                                   fence_list) {
+                       old_obj = old_obj_priv->obj;
+
+                       if (old_obj_priv->pin_count)
+                               continue;
+
+                       /* Take a reference, as otherwise the wait_rendering
+                        * below may cause the object to get freed out from
+                        * under us.
+                        */
+                       drm_gem_object_reference(old_obj);
+
+                       /* i915 uses fences for GPU access to tiled buffers */
+                       if (IS_I965G(dev) || !old_obj_priv->active)
                                break;
-               }
 
-               /*
-                * Now things get ugly... we have to wait for one of the
-                * objects to finish before trying again.
-                */
-               if (i == dev_priv->num_fence_regs) {
-                       ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
-                       if (ret) {
-                               WARN(ret != -ERESTARTSYS,
-                                    "switch to GTT domain failed: %d\n", ret);
+                       /* This brings the object to the head of the LRU if it
+                        * had been written to.  The only way this should
+                        * result in us waiting longer than the expected
+                        * optimal amount of time is if there was a
+                        * fence-using buffer later that was read-only.
+                        */
+                       i915_gem_object_flush_gpu_write_domain(old_obj);
+                       ret = i915_gem_object_wait_rendering(old_obj);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(old_obj);
                                return ret;
                        }
-                       goto try_again;
+
+                       break;
                }
 
                /*
                 * Zap this virtual mapping so we can set up a fence again
                 * for this object next time we need it.
                 */
-               offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
-               if (dev->dev_mapping)
-                       unmap_mapping_range(dev->dev_mapping, offset,
-                                           reg->obj->size, 1);
+               i915_gem_release_mmap(old_obj);
+
+               i = old_obj_priv->fence_reg;
+               reg = &dev_priv->fence_regs[i];
+
                old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+               list_del_init(&old_obj_priv->fence_list);
+
+               drm_gem_object_unreference(old_obj);
        }
 
        obj_priv->fence_reg = i;
+       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
        reg->obj = obj;
 
        if (IS_I965G(dev))
@@ -1612,6 +2482,8 @@ try_again:
        else
                i830_write_fence_reg(reg);
 
+       trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
        return 0;
 }
 
@@ -1631,11 +2503,57 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 
        if (IS_I965G(dev))
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
-       else
-               I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
+       else {
+               uint32_t fence_reg;
+
+               if (obj_priv->fence_reg < 8)
+                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+               else
+                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
+                                                      8) * 4;
+
+               I915_WRITE(fence_reg, 0);
+       }
 
        dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       list_del_init(&obj_priv->fence_list);
+}
+
+/**
+ * i915_gem_object_put_fence_reg - waits on outstanding fenced access
+ * to the buffer to finish, and then resets the fence register.
+ * @obj: tiled object holding a fence register.
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
+ */
+int
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+               return 0;
+
+       /* On the i915, GPU access to tiled buffers is via a fence,
+        * therefore we must wait for any outstanding access to complete
+        * before clearing the fence.
+        */
+       if (!IS_I965G(dev)) {
+               int ret;
+
+               i915_gem_object_flush_gpu_write_domain(obj);
+               i915_gem_object_flush_gtt_write_domain(obj);
+               ret = i915_gem_object_wait_rendering(obj);
+               if (ret != 0)
+                       return ret;
+       }
+
+       i915_gem_clear_fence_reg (obj);
+
+       return 0;
 }
 
 /**
@@ -1648,13 +2566,20 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_mm_node *free_space;
-       int page_count, ret;
+       bool retry_alloc = false;
+       int ret;
 
        if (dev_priv->mm.suspended)
                return -EBUSY;
+
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to bind a purgeable object\n");
+               return -EINVAL;
+       }
+
        if (alignment == 0)
                alignment = i915_gem_get_gtt_alignment(obj);
-       if (alignment & (PAGE_SIZE - 1)) {
+       if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
                DRM_ERROR("Invalid object alignment requested %u\n", alignment);
                return -EINVAL;
        }
@@ -1677,47 +2602,67 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 #if WATCH_LRU
                DRM_INFO("%s: GTT full, evicting something\n", __func__);
 #endif
-               if (list_empty(&dev_priv->mm.inactive_list) &&
-                   list_empty(&dev_priv->mm.flushing_list) &&
-                   list_empty(&dev_priv->mm.active_list)) {
-                       DRM_ERROR("GTT full, but LRU list empty\n");
-                       return -ENOMEM;
-               }
-
-               ret = i915_gem_evict_something(dev);
-               if (ret != 0) {
-                       if (ret != -ERESTARTSYS)
-                               DRM_ERROR("Failed to evict a buffer %d\n", ret);
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
                        return ret;
-               }
+
                goto search_free;
        }
 
 #if WATCH_BUF
-       DRM_INFO("Binding object of size %d at 0x%08x\n",
+       DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
-       ret = i915_gem_object_get_page_list(obj);
+       if (retry_alloc) {
+               i915_gem_object_set_page_gfp_mask (obj,
+                                                  i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+       }
+       ret = i915_gem_object_get_pages(obj);
+       if (retry_alloc) {
+               i915_gem_object_set_page_gfp_mask (obj,
+                                                  i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+       }
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
+
+               if (ret == -ENOMEM) {
+                       /* first try to clear up some space from the GTT */
+                       ret = i915_gem_evict_something(dev, obj->size);
+                       if (ret) {
+                               /* now try to shrink everyone else */
+                               if (! retry_alloc) {
+                                   retry_alloc = true;
+                                   goto search_free;
+                               }
+
+                               return ret;
+                       }
+
+                       goto search_free;
+               }
+
                return ret;
        }
 
-       page_count = obj->size / PAGE_SIZE;
        /* Create an AGP memory structure pointing at our pages, and bind it
         * into the GTT.
         */
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
-                                              obj_priv->page_list,
-                                              page_count,
+                                              obj_priv->pages,
+                                              obj->size >> PAGE_SHIFT,
                                               obj_priv->gtt_offset,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
-               i915_gem_object_free_page_list(obj);
+               i915_gem_object_put_pages(obj);
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
-               return -ENOMEM;
+
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
+                       return ret;
+
+               goto search_free;
        }
        atomic_inc(&dev->gtt_count);
        atomic_add(obj->size, &dev->gtt_memory);
@@ -1726,8 +2671,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
         */
-       BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-       BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+       BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+       BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
+       trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
 
        return 0;
 }
@@ -1741,10 +2688,12 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
-       if (obj_priv->page_list == NULL)
+       if (obj_priv->pages == NULL)
                return;
 
-       drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+       trace_i915_gem_object_clflush(obj);
+
+       drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
@@ -1753,21 +2702,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        uint32_t seqno;
+       uint32_t old_write_domain;
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return;
 
        /* Queue the GPU write cache flushing we need. */
+       old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       seqno = i915_add_request(dev, obj->write_domain);
+       seqno = i915_add_request(dev, NULL, obj->write_domain);
        obj->write_domain = 0;
        i915_gem_object_move_to_active(obj, seqno);
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
 {
+       uint32_t old_write_domain;
+
        if (obj->write_domain != I915_GEM_DOMAIN_GTT)
                return;
 
@@ -1775,7 +2732,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
         * to it immediately go to main memory as far as we know, so there's
         * no chipset flush.  It also doesn't land in render cache.
         */
+       old_write_domain = obj->write_domain;
        obj->write_domain = 0;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
@@ -1783,13 +2745,19 @@ static void
 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       uint32_t old_write_domain;
 
        if (obj->write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
        i915_gem_clflush_object(obj);
        drm_agp_chipset_flush(dev);
+       old_write_domain = obj->write_domain;
        obj->write_domain = 0;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /**
@@ -1802,6 +2770,7 @@ int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_write_domain, old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
@@ -1814,6 +2783,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        if (ret != 0)
                return ret;
 
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
        /* If we're writing through the GTT domain, then CPU and GPU caches
         * will need to be invalidated at next use.
         */
@@ -1832,6 +2804,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
                obj_priv->dirty = 1;
        }
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
        return 0;
 }
 
@@ -1844,7 +2820,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 static int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
-       struct drm_device *dev = obj->dev;
+       uint32_t old_write_domain, old_read_domains;
        int ret;
 
        i915_gem_object_flush_gpu_write_domain(obj);
@@ -1860,10 +2836,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
 
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
                i915_gem_clflush_object(obj);
-               drm_agp_chipset_flush(dev);
 
                obj->read_domains |= I915_GEM_DOMAIN_CPU;
        }
@@ -1881,6 +2859,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
        return 0;
 }
 
@@ -1996,30 +2978,31 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  *             drm_agp_chipset_flush
  */
 static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-                                 uint32_t read_domains,
-                                 uint32_t write_domain)
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
        struct drm_device               *dev = obj->dev;
        struct drm_i915_gem_object      *obj_priv = obj->driver_private;
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
+       uint32_t                        old_read_domains;
+
+       BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
+       BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
 
-       BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
-       BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
+       intel_mark_busy(dev, obj);
 
 #if WATCH_BUF
        DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
                 __func__, obj,
-                obj->read_domains, read_domains,
-                obj->write_domain, write_domain);
+                obj->read_domains, obj->pending_read_domains,
+                obj->write_domain, obj->pending_write_domain);
 #endif
        /*
         * If the object isn't moving to a new write domain,
         * let the object stay in multiple read domains
         */
-       if (write_domain == 0)
-               read_domains |= obj->read_domains;
+       if (obj->pending_write_domain == 0)
+               obj->pending_read_domains |= obj->read_domains;
        else
                obj_priv->dirty = 1;
 
@@ -2029,15 +3012,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         * any read domains which differ from the old
         * write domain
         */
-       if (obj->write_domain && obj->write_domain != read_domains) {
+       if (obj->write_domain &&
+           obj->write_domain != obj->pending_read_domains) {
                flush_domains |= obj->write_domain;
-               invalidate_domains |= read_domains & ~obj->write_domain;
+               invalidate_domains |=
+                       obj->pending_read_domains & ~obj->write_domain;
        }
        /*
         * Invalidate any read caches which may have
         * stale data. That is, any new read domains.
         */
-       invalidate_domains |= read_domains & ~obj->read_domains;
+       invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
        if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
 #if WATCH_BUF
                DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2046,9 +3031,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
                i915_gem_clflush_object(obj);
        }
 
-       if ((write_domain | flush_domains) != 0)
-               obj->write_domain = write_domain;
-       obj->read_domains = read_domains;
+       old_read_domains = obj->read_domains;
+
+       /* The actual obj->write_domain will be updated with
+        * pending_write_domain after we emit the accumulated flush for all
+        * of our domain changes in execbuffers (which clears objects'
+        * write_domains).  So if we have a current write domain that we
+        * aren't changing, set pending_write_domain to that.
+        */
+       if (flush_domains == 0 && obj->pending_write_domain == 0)
+               obj->pending_write_domain = obj->write_domain;
+       obj->read_domains = obj->pending_read_domains;
 
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
@@ -2058,6 +3051,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
                 obj->read_domains, obj->write_domain,
                 dev->invalidate_domains, dev->flush_domains);
 #endif
+
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           obj->write_domain);
 }
 
 /**
@@ -2069,7 +3066,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
        if (!obj_priv->page_cpu_valid)
@@ -2083,16 +3079,14 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
                for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
                        if (obj_priv->page_cpu_valid[i])
                                continue;
-                       drm_clflush_pages(obj_priv->page_list + i, 1);
+                       drm_clflush_pages(obj_priv->pages + i, 1);
                }
-               drm_agp_chipset_flush(dev);
        }
 
        /* Free the page_cpu_valid mappings which are now stale, whether
         * or not we've got I915_GEM_DOMAIN_CPU.
         */
-       drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
-                DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
        obj_priv->page_cpu_valid = NULL;
 }
 
@@ -2113,6 +3107,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                          uint64_t offset, uint64_t size)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_read_domains;
        int i, ret;
 
        if (offset == 0 && size == obj->size)
@@ -2134,8 +3129,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
         * newly adding I915_GEM_DOMAIN_CPU
         */
        if (obj_priv->page_cpu_valid == NULL) {
-               obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
-                                                     DRM_MEM_DRIVER);
+               obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
+                                                  GFP_KERNEL);
                if (obj_priv->page_cpu_valid == NULL)
                        return -ENOMEM;
        } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
@@ -2149,7 +3144,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                if (obj_priv->page_cpu_valid[i])
                        continue;
 
-               drm_clflush_pages(obj_priv->page_list + i, 1);
+               drm_clflush_pages(obj_priv->pages + i, 1);
 
                obj_priv->page_cpu_valid[i] = 1;
        }
@@ -2159,8 +3154,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
+       old_read_domains = obj->read_domains;
        obj->read_domains |= I915_GEM_DOMAIN_CPU;
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           obj->write_domain);
+
        return 0;
 }
 
@@ -2170,12 +3170,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 static int
 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                 struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object *entry)
+                                struct drm_i915_gem_exec_object *entry,
+                                struct drm_i915_gem_relocation_entry *relocs)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_relocation_entry reloc;
-       struct drm_i915_gem_relocation_entry __user *relocs;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int i, ret;
        void __iomem *reloc_page;
@@ -2187,113 +3186,120 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 
        entry->offset = obj_priv->gtt_offset;
 
-       relocs = (struct drm_i915_gem_relocation_entry __user *)
-                (uintptr_t) entry->relocs_ptr;
        /* Apply the relocations, using the GTT aperture to avoid cache
         * flushing requirements.
         */
        for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
                struct drm_gem_object *target_obj;
                struct drm_i915_gem_object *target_obj_priv;
                uint32_t reloc_val, reloc_offset;
                uint32_t __iomem *reloc_entry;
 
-               ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
-
                target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-                                                  reloc.target_handle);
+                                                  reloc->target_handle);
                if (target_obj == NULL) {
                        i915_gem_object_unpin(obj);
                        return -EBADF;
                }
                target_obj_priv = target_obj->driver_private;
 
+#if WATCH_RELOC
+               DRM_INFO("%s: obj %p offset %08x target %d "
+                        "read %08x write %08x gtt %08x "
+                        "presumed %08x delta %08x\n",
+                        __func__,
+                        obj,
+                        (int) reloc->offset,
+                        (int) reloc->target_handle,
+                        (int) reloc->read_domains,
+                        (int) reloc->write_domain,
+                        (int) target_obj_priv->gtt_offset,
+                        (int) reloc->presumed_offset,
+                        reloc->delta);
+#endif
+
                /* The target buffer should have appeared before us in the
                 * exec_object list, so it should have a GTT space bound by now.
                 */
                if (target_obj_priv->gtt_space == NULL) {
                        DRM_ERROR("No GTT space found for object %d\n",
-                                 reloc.target_handle);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-
-               if (reloc.offset > obj->size - 4) {
-                       DRM_ERROR("Relocation beyond object bounds: "
-                                 "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset, (int) obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-               if (reloc.offset & 3) {
-                       DRM_ERROR("Relocation not 4-byte aligned: "
-                                 "obj %p target %d offset %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset);
+                                 reloc->target_handle);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
 
-               if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
-                   reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+               /* Validate that the target is in a valid r/w GPU domain */
+               if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+                   reloc->read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.read_domains,
-                                 reloc.write_domain);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->read_domains,
+                                 reloc->write_domain);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
-
-               if (reloc.write_domain && target_obj->pending_write_domain &&
-                   reloc.write_domain != target_obj->pending_write_domain) {
+               if (reloc->write_domain && target_obj->pending_write_domain &&
+                   reloc->write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
                                  "obj %p target %d offset %d "
                                  "new %08x old %08x\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.write_domain,
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->write_domain,
                                  target_obj->pending_write_domain);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
 
-#if WATCH_RELOC
-               DRM_INFO("%s: obj %p offset %08x target %d "
-                        "read %08x write %08x gtt %08x "
-                        "presumed %08x delta %08x\n",
-                        __func__,
-                        obj,
-                        (int) reloc.offset,
-                        (int) reloc.target_handle,
-                        (int) reloc.read_domains,
-                        (int) reloc.write_domain,
-                        (int) target_obj_priv->gtt_offset,
-                        (int) reloc.presumed_offset,
-                        reloc.delta);
-#endif
-
-               target_obj->pending_read_domains |= reloc.read_domains;
-               target_obj->pending_write_domain |= reloc.write_domain;
+               target_obj->pending_read_domains |= reloc->read_domains;
+               target_obj->pending_write_domain |= reloc->write_domain;
 
                /* If the relocation already has the right value in it, no
                 * more work needs to be done.
                 */
-               if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+               if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
                        drm_gem_object_unreference(target_obj);
                        continue;
                }
 
+               /* Check that the relocation address is valid... */
+               if (reloc->offset > obj->size - 4) {
+                       DRM_ERROR("Relocation beyond object bounds: "
+                                 "obj %p target %d offset %d size %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset, (int) obj->size);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+               if (reloc->offset & 3) {
+                       DRM_ERROR("Relocation not 4-byte aligned: "
+                                 "obj %p target %d offset %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
+               /* and points to somewhere within the target object. */
+               if (reloc->delta >= target_obj->size) {
+                       DRM_ERROR("Relocation beyond target object bounds: "
+                                 "obj %p target %d delta %d size %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->delta, (int) target_obj->size);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
                ret = i915_gem_object_set_to_gtt_domain(obj, 1);
                if (ret != 0) {
                        drm_gem_object_unreference(target_obj);
@@ -2304,32 +3310,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                /* Map the page containing the relocation we're going to
                 * perform.
                 */
-               reloc_offset = obj_priv->gtt_offset + reloc.offset;
+               reloc_offset = obj_priv->gtt_offset + reloc->offset;
                reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
                                                      (reloc_offset &
                                                       ~(PAGE_SIZE - 1)));
                reloc_entry = (uint32_t __iomem *)(reloc_page +
                                                   (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
 
 #if WATCH_BUF
                DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc.offset,
+                         obj, (unsigned int) reloc->offset,
                          readl(reloc_entry), reloc_val);
 #endif
                writel(reloc_val, reloc_entry);
                io_mapping_unmap_atomic(reloc_page);
 
-               /* Write the updated presumed offset for this entry back out
-                * to the user.
+               /* The updated presumed offset for this entry will be
+                * copied back out to the user.
                 */
-               reloc.presumed_offset = target_obj_priv->gtt_offset;
-               ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
+               reloc->presumed_offset = target_obj_priv->gtt_offset;
 
                drm_gem_object_unreference(target_obj);
        }
@@ -2346,32 +3346,25 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 static int
 i915_dispatch_gem_execbuffer(struct drm_device *dev,
                              struct drm_i915_gem_execbuffer *exec,
+                             struct drm_clip_rect *cliprects,
                              uint64_t exec_offset)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
-                                            (uintptr_t) exec->cliprects_ptr;
        int nbox = exec->num_cliprects;
        int i = 0, count;
-       uint32_t        exec_start, exec_len;
+       uint32_t exec_start, exec_len;
        RING_LOCALS;
 
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
 
-       if ((exec_start | exec_len) & 0x7) {
-               DRM_ERROR("alignment\n");
-               return -EINVAL;
-       }
-
-       if (!exec_start)
-               return -EINVAL;
+       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
 
        count = nbox ? nbox : 1;
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
+                       int ret = i915_emit_box(dev, cliprects, i,
                                                exec->DR1, exec->DR4);
                        if (ret)
                                return ret;
@@ -2407,24 +3400,129 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
  * This should get us reasonable parallelism between CPU and GPU but also
  * relatively low latency when blocking on a particular request to finish.
  */
 static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       int ret = 0;
+       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+
+       mutex_lock(&dev->struct_mutex);
+       while (!list_empty(&i915_file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&i915_file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+
+               if (time_after_eq(request->emitted_jiffies, recent_enough))
+                       break;
+
+               ret = i915_wait_request(dev, request->seqno);
+               if (ret != 0)
+                       break;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+                             uint32_t buffer_count,
+                             struct drm_i915_gem_relocation_entry **relocs)
+{
+       uint32_t reloc_count = 0, reloc_index = 0, i;
+       int ret;
+
+       *relocs = NULL;
+       for (i = 0; i < buffer_count; i++) {
+               if (reloc_count + exec_list[i].relocation_count < reloc_count)
+                       return -EINVAL;
+               reloc_count += exec_list[i].relocation_count;
+       }
+
+       *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
+       if (*relocs == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+               ret = copy_from_user(&(*relocs)[reloc_index],
+                                    user_relocs,
+                                    exec_list[i].relocation_count *
+                                    sizeof(**relocs));
+               if (ret != 0) {
+                       drm_free_large(*relocs);
+                       *relocs = NULL;
+                       return -EFAULT;
+               }
+
+               reloc_index += exec_list[i].relocation_count;
+       }
+
+       return 0;
+}
+
+static int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+                           uint32_t buffer_count,
+                           struct drm_i915_gem_relocation_entry *relocs)
+{
+       uint32_t reloc_count = 0, i;
+       int ret = 0;
+
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+               int unwritten;
+
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+               unwritten = copy_to_user(user_relocs,
+                                        &relocs[reloc_count],
+                                        exec_list[i].relocation_count *
+                                        sizeof(*relocs));
+
+               if (unwritten) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               reloc_count += exec_list[i].relocation_count;
+       }
+
+err:
+       drm_free_large(relocs);
+
+       return ret;
+}
+
+static int
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+                          uint64_t exec_offset)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
-       uint32_t seqno;
+       uint32_t exec_start, exec_len;
 
-       mutex_lock(&dev->struct_mutex);
-       seqno = i915_file_priv->mm.last_gem_throttle_seqno;
-       i915_file_priv->mm.last_gem_throttle_seqno =
-               i915_file_priv->mm.last_gem_seqno;
-       if (seqno)
-               ret = i915_wait_request(dev, seqno);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       if ((exec_start | exec_len) & 0x7)
+               return -EINVAL;
+
+       if (!exec_start)
+               return -EINVAL;
+
+       return 0;
 }
 
 int
@@ -2432,14 +3530,16 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_exec_object *exec_list = NULL;
        struct drm_gem_object **object_list = NULL;
        struct drm_gem_object *batch_obj;
-       int ret, i, pinned = 0;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_clip_rect *cliprects = NULL;
+       struct drm_i915_gem_relocation_entry *relocs;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains;
+       uint32_t seqno, flush_domains, reloc_index;
        int pin_tries;
 
 #if WATCH_EXEC
@@ -2452,10 +3552,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                return -EINVAL;
        }
        /* Copy in the exec list from userland */
-       exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
-                              DRM_MEM_DRIVER);
-       object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
-                                DRM_MEM_DRIVER);
+       exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
+       object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
        if (exec_list == NULL || object_list == NULL) {
                DRM_ERROR("Failed to allocate exec or object list "
                          "for %d buffers\n",
@@ -2473,20 +3571,44 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
+       if (args->num_cliprects != 0) {
+               cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
+                                   GFP_KERNEL);
+               if (cliprects == NULL)
+                       goto pre_mutex_err;
+
+               ret = copy_from_user(cliprects,
+                                    (struct drm_clip_rect __user *)
+                                    (uintptr_t) args->cliprects_ptr,
+                                    sizeof(*cliprects) * args->num_cliprects);
+               if (ret != 0) {
+                       DRM_ERROR("copy %d cliprects failed: %d\n",
+                                 args->num_cliprects, ret);
+                       goto pre_mutex_err;
+               }
+       }
+
+       ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+                                           &relocs);
+       if (ret != 0)
+               goto pre_mutex_err;
+
        mutex_lock(&dev->struct_mutex);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
-               return -EIO;
+               ret = -EIO;
+               goto pre_mutex_err;
        }
 
        if (dev_priv->mm.suspended) {
                DRM_ERROR("Execbuf while VT-switched.\n");
                mutex_unlock(&dev->struct_mutex);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto pre_mutex_err;
        }
 
        /* Look up object handles */
@@ -2499,29 +3621,57 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                        ret = -EBADF;
                        goto err;
                }
+
+               obj_priv = object_list[i]->driver_private;
+               if (obj_priv->in_execbuffer) {
+                       DRM_ERROR("Object %p appears more than once in object list\n",
+                                  object_list[i]);
+                       ret = -EBADF;
+                       goto err;
+               }
+               obj_priv->in_execbuffer = true;
        }
 
        /* Pin and relocate */
        for (pin_tries = 0; ; pin_tries++) {
                ret = 0;
+               reloc_index = 0;
+
                for (i = 0; i < args->buffer_count; i++) {
                        object_list[i]->pending_read_domains = 0;
                        object_list[i]->pending_write_domain = 0;
                        ret = i915_gem_object_pin_and_relocate(object_list[i],
                                                               file_priv,
-                                                              &exec_list[i]);
+                                                              &exec_list[i],
+                                                              &relocs[reloc_index]);
                        if (ret)
                                break;
                        pinned = i + 1;
+                       reloc_index += exec_list[i].relocation_count;
                }
                /* success */
                if (ret == 0)
                        break;
 
                /* error other than GTT full, or we've already tried again */
-               if (ret != -ENOMEM || pin_tries >= 1) {
-                       if (ret != -ERESTARTSYS)
-                               DRM_ERROR("Failed to pin buffers %d\n", ret);
+               if (ret != -ENOSPC || pin_tries >= 1) {
+                       if (ret != -ERESTARTSYS) {
+                               unsigned long long total_size = 0;
+                               for (i = 0; i < args->buffer_count; i++)
+                                       total_size += object_list[i]->size;
+                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+                                         pinned+1, args->buffer_count,
+                                         total_size, ret);
+                               DRM_ERROR("%d objects [%d pinned], "
+                                         "%d object bytes [%d pinned], "
+                                         "%d/%d gtt bytes\n",
+                                         atomic_read(&dev->object_count),
+                                         atomic_read(&dev->pin_count),
+                                         atomic_read(&dev->object_memory),
+                                         atomic_read(&dev->pin_memory),
+                                         atomic_read(&dev->gtt_memory),
+                                         dev->gtt_total);
+                       }
                        goto err;
                }
 
@@ -2532,14 +3682,26 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
                /* evict everyone we can from the aperture */
                ret = i915_gem_evict_everything(dev);
-               if (ret)
+               if (ret && ret != -ENOSPC)
                        goto err;
        }
 
        /* Set the pending read domains for the batch buffer to COMMAND */
        batch_obj = object_list[args->buffer_count-1];
-       batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-       batch_obj->pending_write_domain = 0;
+       if (batch_obj->pending_write_domain) {
+               DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+               ret = -EINVAL;
+               goto err;
+       }
+       batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+       /* Sanity check the batch buffer, prior to moving objects */
+       exec_offset = exec_list[args->buffer_count - 1].offset;
+       ret = i915_gem_check_execbuffer (args, exec_offset);
+       if (ret != 0) {
+               DRM_ERROR("execbuf with invalid offset/length\n");
+               goto err;
+       }
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -2554,9 +3716,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                struct drm_gem_object *obj = object_list[i];
 
                /* Compute new gpu domains and update invalidate/flush */
-               i915_gem_object_set_to_gpu_domain(obj,
-                                                 obj->pending_read_domains,
-                                                 obj->pending_write_domain);
+               i915_gem_object_set_to_gpu_domain(obj);
        }
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2572,7 +3732,18 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                               dev->invalidate_domains,
                               dev->flush_domains);
                if (dev->flush_domains)
-                       (void)i915_add_request(dev, dev->flush_domains);
+                       (void)i915_add_request(dev, file_priv,
+                                              dev->flush_domains);
+       }
+
+       for (i = 0; i < args->buffer_count; i++) {
+               struct drm_gem_object *obj = object_list[i];
+               uint32_t old_write_domain = obj->write_domain;
+
+               obj->write_domain = obj->pending_write_domain;
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2584,17 +3755,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        }
 #endif
 
-       exec_offset = exec_list[args->buffer_count - 1].offset;
-
 #if WATCH_EXEC
-       i915_gem_dump_object(object_list[args->buffer_count - 1],
+       i915_gem_dump_object(batch_obj,
                              args->batch_len,
                              __func__,
                              ~0);
 #endif
 
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -2615,9 +3784,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
         * *some* interrupts representing completion of buffers that we can
         * wait on when trying to clear up gtt space).
         */
-       seqno = i915_add_request(dev, flush_domains);
+       seqno = i915_add_request(dev, file_priv, flush_domains);
        BUG_ON(seqno == 0);
-       i915_file_priv->mm.last_gem_seqno = seqno;
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
 
@@ -2632,29 +3800,52 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       /* Copy the new buffer offsets back to the user's exec list. */
-       ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-                          (uintptr_t) args->buffers_ptr,
-                          exec_list,
-                          sizeof(*exec_list) * args->buffer_count);
-       if (ret)
-               DRM_ERROR("failed to copy %d exec entries "
-                         "back to user (%d)\n",
-                          args->buffer_count, ret);
 err:
        for (i = 0; i < pinned; i++)
                i915_gem_object_unpin(object_list[i]);
 
-       for (i = 0; i < args->buffer_count; i++)
+       for (i = 0; i < args->buffer_count; i++) {
+               if (object_list[i]) {
+                       obj_priv = object_list[i]->driver_private;
+                       obj_priv->in_execbuffer = false;
+               }
                drm_gem_object_unreference(object_list[i]);
+       }
 
        mutex_unlock(&dev->struct_mutex);
 
+       if (!ret) {
+               /* Copy the new buffer offsets back to the user's exec list. */
+               ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+                                  (uintptr_t) args->buffers_ptr,
+                                  exec_list,
+                                  sizeof(*exec_list) * args->buffer_count);
+               if (ret) {
+                       ret = -EFAULT;
+                       DRM_ERROR("failed to copy %d exec entries "
+                                 "back to user (%d)\n",
+                                 args->buffer_count, ret);
+               }
+       }
+
+       /* Copy the updated relocations out regardless of current error
+        * state.  Failure to update the relocs would mean that the next
+        * time userland calls execbuf, it would do so with presumed offset
+        * state that didn't match the actual object state.
+        */
+       ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+                                          relocs);
+       if (ret2 != 0) {
+               DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+               if (ret == 0)
+                       ret = ret2;
+       }
+
 pre_mutex_err:
-       drm_free(object_list, sizeof(*object_list) * args->buffer_count,
-                DRM_MEM_DRIVER);
-       drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
-                DRM_MEM_DRIVER);
+       drm_free_large(object_list);
+       drm_free_large(exec_list);
+       kfree(cliprects);
 
        return ret;
 }
@@ -2669,19 +3860,21 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment);
+               if (ret)
+                       return ret;
+       }
+       /*
+        * Pre-965 chips need a fence register set up in order to
+        * properly handle tiled surfaces.
+        */
+       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
                        if (ret != -EBUSY && ret != -ERESTARTSYS)
-                               DRM_ERROR("Failure to bind: %d", ret);
+                               DRM_ERROR("Failure to install fence: %d\n",
+                                         ret);
                        return ret;
                }
-               /*
-                * Pre-965 chips need a fence register set up in order to
-                * properly handle tiled surfaces.
-                */
-               if (!IS_I965G(dev) &&
-                   obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-                   obj_priv->tiling_mode != I915_TILING_NONE)
-                       i915_gem_object_get_fence_reg(obj, true);
        }
        obj_priv->pin_count++;
 
@@ -2692,8 +3885,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
                atomic_inc(&dev->pin_count);
                atomic_add(obj->size, &dev->pin_memory);
                if (!obj_priv->active &&
-                   (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-                                          I915_GEM_DOMAIN_GTT)) == 0 &&
+                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
                    !list_empty(&obj_priv->list))
                        list_del_init(&obj_priv->list);
        }
@@ -2720,8 +3912,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
         */
        if (obj_priv->pin_count == 0) {
                if (!obj_priv->active &&
-                   (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-                                          I915_GEM_DOMAIN_GTT)) == 0)
+                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                        list_move_tail(&obj_priv->list,
                                       &dev_priv->mm.inactive_list);
                atomic_dec(&dev->pin_count);
@@ -2750,9 +3941,17 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
        obj_priv = obj->driver_private;
 
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to pin a purgeable buffer\n");
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
        if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
+               drm_gem_object_unreference(obj);
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
@@ -2824,15 +4023,21 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 
-       mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -EBADF;
        }
 
+       mutex_lock(&dev->struct_mutex);
+       /* Update the active list for the hardware's current position.
+        * Otherwise this only updates on a delayed timer or when irqs are
+        * actually unmasked, and our working set ends up being larger than
+        * required.
+        */
+       i915_gem_retire_requests(dev);
+
        obj_priv = obj->driver_private;
        /* Don't count being on the flushing list against the object being
         * done.  Otherwise, a buffer left on the flushing list but not getting
@@ -2855,11 +4060,61 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
     return i915_gem_ring_throttle(dev, file_priv);
 }
 
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_i915_gem_madvise *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       switch (args->madv) {
+       case I915_MADV_DONTNEED:
+       case I915_MADV_WILLNEED:
+           break;
+       default:
+           return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+                         args->handle);
+               return -EBADF;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       obj_priv = obj->driver_private;
+
+       if (obj_priv->pin_count) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+
+               DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+               return -EINVAL;
+       }
+
+       if (obj_priv->madv != __I915_MADV_PURGED)
+               obj_priv->madv = args->madv;
+
+       /* if the object is no longer bound, discard its backing storage */
+       if (i915_gem_object_is_purgeable(obj_priv) &&
+           obj_priv->gtt_space == NULL)
+               i915_gem_object_truncate(obj);
+
+       args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 int i915_gem_init_object(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv;
 
-       obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+       obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
        if (obj_priv == NULL)
                return -ENOMEM;
 
@@ -2878,6 +4133,10 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->fence_list);
+       obj_priv->madv = I915_MADV_WILLNEED;
+
+       trace_i915_gem_object_create(obj);
 
        return 0;
 }
@@ -2885,11 +4144,10 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
-       struct drm_map *map;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
+       trace_i915_gem_object_destroy(obj);
+
        while (obj_priv->pin_count > 0)
                i915_gem_object_unpin(obj);
 
@@ -2898,58 +4156,39 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
        i915_gem_object_unbind(obj);
 
-       list = &obj->map_list;
-       drm_ht_remove_item(&mm->offset_hash, &list->hash);
-
-       if (list->file_offset_node) {
-               drm_mm_put_block(list->file_offset_node);
-               list->file_offset_node = NULL;
-       }
-
-       map = list->map;
-       if (map) {
-               drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
-               list->map = NULL;
-       }
+       if (obj_priv->mmap_offset)
+               i915_gem_free_mmap_offset(obj);
 
-       drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
-       drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
+       kfree(obj_priv->bit_17);
+       kfree(obj->driver_private);
 }
 
-/** Unbinds all objects that are on the given buffer list. */
+/** Unbinds all inactive objects. */
 static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
 {
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       while (!list_empty(head)) {
-               obj_priv = list_first_entry(head,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj = obj_priv->obj;
+       while (!list_empty(&dev_priv->mm.inactive_list)) {
+               struct drm_gem_object *obj;
+               int ret;
 
-               if (obj_priv->pin_count != 0) {
-                       DRM_ERROR("Pinned object in unbind list\n");
-                       mutex_unlock(&dev->struct_mutex);
-                       return -EINVAL;
-               }
+               obj = list_first_entry(&dev_priv->mm.inactive_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
 
                ret = i915_gem_object_unbind(obj);
                if (ret != 0) {
-                       DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
-                                 ret);
-                       mutex_unlock(&dev->struct_mutex);
+                       DRM_ERROR("Error unbinding object: %d\n", ret);
                        return ret;
                }
        }
 
-
        return 0;
 }
 
-static int
+int
 i915_gem_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2967,6 +4206,7 @@ i915_gem_idle(struct drm_device *dev)
         * We need to replace this with a semaphore, or something.
         */
        dev_priv->mm.suspended = 1;
+       del_timer(&dev_priv->hangcheck_timer);
 
        /* Cancel the retire work handler, wait for it to finish if running
         */
@@ -2978,9 +4218,8 @@ i915_gem_idle(struct drm_device *dev)
 
        /* Flush the GPU along with all non-CPU write domains
         */
-       i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
-                      ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-       seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
+       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
 
        if (seqno == 0) {
                mutex_unlock(&dev->struct_mutex);
@@ -2997,7 +4236,7 @@ i915_gem_idle(struct drm_device *dev)
                if (last_seqno == cur_seqno) {
                        if (stuck++ > 100) {
                                DRM_ERROR("hardware wedged\n");
-                               dev_priv->mm.wedged = 1;
+                               atomic_set(&dev_priv->mm.wedged, 1);
                                DRM_WAKEUP(&dev_priv->irq_queue);
                                break;
                        }
@@ -3009,7 +4248,8 @@ i915_gem_idle(struct drm_device *dev)
 
        i915_gem_retire_requests(dev);
 
-       if (!dev_priv->mm.wedged) {
+       spin_lock(&dev_priv->mm.active_list_lock);
+       if (!atomic_read(&dev_priv->mm.wedged)) {
                /* Active and flushing should now be empty as we've
                 * waited for a sequence higher than any pending execbuffer
                 */
@@ -3027,28 +4267,41 @@ i915_gem_idle(struct drm_device *dev)
         * the GPU domains and just stuff them onto inactive.
         */
        while (!list_empty(&dev_priv->mm.active_list)) {
-               struct drm_i915_gem_object *obj_priv;
-
-               obj_priv = list_first_entry(&dev_priv->mm.active_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj_priv->obj);
+               struct drm_gem_object *obj;
+               uint32_t old_write_domain;
+
+               obj = list_first_entry(&dev_priv->mm.active_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
+               old_write_domain = obj->write_domain;
+               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+               i915_gem_object_move_to_inactive(obj);
+
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
+       spin_unlock(&dev_priv->mm.active_list_lock);
 
        while (!list_empty(&dev_priv->mm.flushing_list)) {
-               struct drm_i915_gem_object *obj_priv;
-
-               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj_priv->obj);
+               struct drm_gem_object *obj;
+               uint32_t old_write_domain;
+
+               obj = list_first_entry(&dev_priv->mm.flushing_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
+               old_write_domain = obj->write_domain;
+               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+               i915_gem_object_move_to_inactive(obj);
+
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
 
 
        /* Move all inactive buffers out of the GTT. */
-       ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+       ret = i915_gem_evict_from_inactive_list(dev);
        WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@ -3091,10 +4344,11 @@ i915_gem_init_hws(struct drm_device *dev)
 
        dev_priv->status_gfx_addr = obj_priv->gtt_offset;
 
-       dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
        if (dev_priv->hw_status_page == NULL) {
                DRM_ERROR("Failed to map status page.\n");
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
                drm_gem_object_unreference(obj);
                return -EINVAL;
        }
@@ -3107,6 +4361,31 @@ i915_gem_init_hws(struct drm_device *dev)
        return 0;
 }
 
+static void
+i915_gem_cleanup_hws(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       if (dev_priv->hws_obj == NULL)
+               return;
+
+       obj = dev_priv->hws_obj;
+       obj_priv = obj->driver_private;
+
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       dev_priv->hws_obj = NULL;
+
+       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+       dev_priv->hw_status_page = NULL;
+
+       /* Write high address into HWS_PGA when disabling. */
+       I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
@@ -3124,6 +4403,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        obj = drm_gem_object_alloc(dev, 128 * 1024);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate ringbuffer\n");
+               i915_gem_cleanup_hws(dev);
                return -ENOMEM;
        }
        obj_priv = obj->driver_private;
@@ -3131,12 +4411,12 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
                drm_gem_object_unreference(obj);
+               i915_gem_cleanup_hws(dev);
                return ret;
        }
 
        /* Set up the kernel mapping for the ring. */
        ring->Size = obj->size;
-       ring->tail_mask = obj->size - 1;
 
        ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
        ring->map.size = obj->size;
@@ -3148,7 +4428,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        if (ring->map.handle == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
                memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+               i915_gem_object_unpin(obj);
                drm_gem_object_unreference(obj);
+               i915_gem_cleanup_hws(dev);
                return -EINVAL;
        }
        ring->ring_obj = obj;
@@ -3228,20 +4510,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        dev_priv->ring.ring_obj = NULL;
        memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
 
-       if (dev_priv->hws_obj != NULL) {
-               struct drm_gem_object *obj = dev_priv->hws_obj;
-               struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
-               kunmap(obj_priv->page_list[0]);
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               dev_priv->hws_obj = NULL;
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               dev_priv->hw_status_page = NULL;
-
-               /* Write high address into HWS_PGA when disabling. */
-               I915_WRITE(HWS_PGA, 0x1ffff000);
-       }
+       i915_gem_cleanup_hws(dev);
 }
 
 int
@@ -3254,19 +4523,24 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               dev_priv->mm.wedged = 0;
+               atomic_set(&dev_priv->mm.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
        dev_priv->mm.suspended = 0;
 
        ret = i915_gem_init_ringbuffer(dev);
-       if (ret != 0)
+       if (ret != 0) {
+               mutex_unlock(&dev->struct_mutex);
                return ret;
+       }
 
+       spin_lock(&dev_priv->mm.active_list_lock);
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        BUG_ON(!list_empty(&dev_priv->mm.request_list));
@@ -3281,15 +4555,11 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
-
-       return ret;
+       return i915_gem_idle(dev);
 }
 
 void
@@ -3308,16 +4578,23 @@ i915_gem_lastclose(struct drm_device *dev)
 void
 i915_gem_load(struct drm_device *dev)
 {
+       int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
+       spin_lock_init(&dev_priv->mm.active_list_lock);
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       spin_lock(&shrink_list_lock);
+       list_add(&dev_priv->mm.shrink_list, &shrink_list);
+       spin_unlock(&shrink_list_lock);
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
@@ -3326,6 +4603,18 @@ i915_gem_load(struct drm_device *dev)
        else
                dev_priv->num_fence_regs = 8;
 
+       /* Initialize fence registers to zero */
+       if (IS_I965G(dev)) {
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+       } else {
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       }
+
        i915_gem_detect_bit_6_swizzle(dev);
 }
 
@@ -3343,7 +4632,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
        if (dev_priv->mm.phys_objs[id - 1] || !size)
                return 0;
 
-       phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
        if (!phys_obj)
                return -ENOMEM;
 
@@ -3362,7 +4651,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
 
        return 0;
 kfree_obj:
-       drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       kfree(phys_obj);
        return ret;
 }
 
@@ -3407,21 +4696,23 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        if (!obj_priv->phys_obj)
                return;
 
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret)
                goto out;
 
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(dst, KM_USER0);
        }
-       drm_clflush_pages(obj_priv->page_list, page_count);
+       drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
+
+       i915_gem_object_put_pages(obj);
 out:
        obj_priv->phys_obj->cur_obj = NULL;
        obj_priv->phys_obj = NULL;
@@ -3463,7 +4754,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
 
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                DRM_ERROR("failed to get page list\n");
                goto out;
@@ -3472,13 +4763,15 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(src, KM_USER0);
        }
 
+       i915_gem_object_put_pages(obj);
+
        return 0;
 out:
        return ret;
@@ -3497,7 +4790,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
 
-       DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
+       DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
        ret = copy_from_user(obj_addr, user_data, args->size);
        if (ret)
                return -EFAULT;
@@ -3505,3 +4798,130 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        drm_agp_chipset_flush(dev);
        return 0;
 }
+
+void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+       /* Clean up our request list when the client is going away, so that
+        * later retire_requests won't dereference our soon-to-be-gone
+        * file_priv.
+        */
+       mutex_lock(&dev->struct_mutex);
+       while (!list_empty(&i915_file_priv->mm.request_list))
+               list_del_init(i915_file_priv->mm.request_list.next);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       drm_i915_private_t *dev_priv, *next_dev;
+       struct drm_i915_gem_object *obj_priv, *next_obj;
+       int cnt = 0;
+       int would_deadlock = 1;
+
+       /* "fast-path" to count number of available objects */
+       if (nr_to_scan == 0) {
+               spin_lock(&shrink_list_lock);
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (mutex_trylock(&dev->struct_mutex)) {
+                               list_for_each_entry(obj_priv,
+                                                   &dev_priv->mm.inactive_list,
+                                                   list)
+                                       cnt++;
+                               mutex_unlock(&dev->struct_mutex);
+                       }
+               }
+               spin_unlock(&shrink_list_lock);
+
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       }
+
+       spin_lock(&shrink_list_lock);
+
+       /* first scan for clean buffers */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               i915_gem_retire_requests(dev);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (i915_gem_object_is_purgeable(obj_priv)) {
+                               i915_gem_object_unbind(obj_priv->obj);
+                               if (--nr_to_scan <= 0)
+                                       break;
+                       }
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+
+               if (nr_to_scan <= 0)
+                       break;
+       }
+
+       /* second pass, evict/count anything still on the inactive list */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (nr_to_scan > 0) {
+                               i915_gem_object_unbind(obj_priv->obj);
+                               nr_to_scan--;
+                       } else
+                               cnt++;
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+       }
+
+       spin_unlock(&shrink_list_lock);
+
+       if (would_deadlock)
+               return -1;
+       else if (cnt > 0)
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       else
+               return 0;
+}
+
+static struct shrinker shrinker = {
+       .shrink = i915_gem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+    register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+    unregister_shrinker(&shrinker);
+}