tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
index c0ae6bb..a2a3fa5 100644 (file)
@@ -29,6 +29,8 @@
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
 #include <linux/swap.h>
 #include <linux/pci.h>
 
@@ -46,13 +48,16 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
-static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
@@ -112,7 +117,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_create *args = data;
        struct drm_gem_object *obj;
-       int handle, ret;
+       int ret;
+       u32 handle;
 
        args->size = roundup(args->size, PAGE_SIZE);
 
@@ -315,6 +321,45 @@ fail_unlock:
        return ret;
 }
 
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+       return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+       mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
+static int
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+{
+       int ret;
+
+       ret = i915_gem_object_get_pages(obj);
+
+       /* If we've insufficient memory to map in the pages, attempt
+        * to make some space by throwing out some old buffers.
+        */
+       if (ret == -ENOMEM) {
+               struct drm_device *dev = obj->dev;
+               gfp_t gfp;
+
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
+                       return ret;
+
+               gfp = i915_gem_object_get_page_gfp_mask(obj);
+               i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+               ret = i915_gem_object_get_pages(obj);
+               i915_gem_object_set_page_gfp_mask (obj, gfp);
+       }
+
+       return ret;
+}
+
 /**
  * This is the fallback shmem pread path, which allocates temporary storage
  * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -366,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_get_pages(obj);
-       if (ret != 0)
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
                goto fail_unlock;
 
        ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -841,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_get_pages(obj);
-       if (ret != 0)
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
                goto fail_unlock;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -979,8 +1024,10 @@ int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
        struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
        uint32_t read_domains = args->read_domains;
        uint32_t write_domain = args->write_domain;
        int ret;
@@ -1004,15 +1051,27 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
+       obj_priv = obj->driver_private;
 
        mutex_lock(&dev->struct_mutex);
+
+       intel_mark_busy(dev, obj);
+
 #if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
                 obj, obj->size, read_domains, write_domain);
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
+               /* Update the LRU on the fence for the CPU access that's
+                * about to occur.
+                */
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       list_move_tail(&obj_priv->fence_list,
+                                      &dev_priv->mm.fence_list);
+               }
+
                /* Silently promote "you're not bound, there was nothing to do"
                 * to success, since the client was just asking us to
                 * make sure everything was done.
@@ -1051,7 +1110,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
 #if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %d)\n",
+       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
        obj_priv = obj->driver_private;
@@ -1140,29 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
        if (!obj_priv->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
-
-               ret = i915_gem_object_set_to_gtt_domain(obj, write);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
+               if (ret)
+                       goto unlock;
 
                list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, write);
+               if (ret)
+                       goto unlock;
        }
 
        /* Need a new fence register? */
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj, write);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return VM_FAULT_SIGBUS;
-               }
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
+               if (ret)
+                       goto unlock;
        }
 
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1170,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
        mutex_unlock(&dev->struct_mutex);
 
        switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+               return VM_FAULT_NOPAGE;
        case -ENOMEM:
        case -EAGAIN:
                return VM_FAULT_OOM;
-       case -EFAULT:
-       case -EINVAL:
-               return VM_FAULT_SIGBUS;
        default:
-               return VM_FAULT_NOPAGE;
+               return VM_FAULT_SIGBUS;
        }
 }
 
@@ -1208,8 +1260,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 
        /* Set the object up for mmap'ing */
        list = &obj->map_list;
-       list->map = drm_calloc(1, sizeof(struct drm_map_list),
-                              DRM_MEM_DRIVER);
+       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
        if (!list->map)
                return -ENOMEM;
 
@@ -1249,11 +1300,36 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 out_free_mm:
        drm_mm_put_block(list->file_offset_node);
 out_free_list:
-       drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+       kfree(list->map);
 
        return ret;
 }
 
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (dev->dev_mapping)
+               unmap_mapping_range(dev->dev_mapping,
+                                   obj_priv->mmap_offset, obj->size, 1);
+}
+
 static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
@@ -1271,7 +1347,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
        }
 
        if (list->map) {
-               drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
+               kfree(list->map);
                list->map = NULL;
        }
 
@@ -1350,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        obj_priv = obj->driver_private;
 
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+
        if (!obj_priv->mmap_offset) {
                ret = i915_gem_create_mmap_offset(obj);
                if (ret) {
@@ -1361,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        args->offset = obj_priv->mmap_offset;
 
-       obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
-       /* Make sure the alignment is correct for fence regs etc */
-       if (obj_priv->agp_mem &&
-           (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
        /*
         * Pull it into the GTT so that we have a page list (makes the
         * initial fault faster and any subsequent flushing possible).
         */
        if (!obj_priv->agp_mem) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        drm_gem_object_unreference(obj);
                        mutex_unlock(&dev->struct_mutex);
@@ -1399,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        int i;
 
        BUG_ON(obj_priv->pages_refcount == 0);
+       BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
 
        if (--obj_priv->pages_refcount != 0)
                return;
@@ -1406,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        if (obj_priv->tiling_mode != I915_TILING_NONE)
                i915_gem_object_save_bit_17_swizzle(obj);
 
-       for (i = 0; i < page_count; i++)
-               if (obj_priv->pages[i] != NULL) {
-                       if (obj_priv->dirty)
-                               set_page_dirty(obj_priv->pages[i]);
+       if (obj_priv->madv == I915_MADV_DONTNEED)
+               obj_priv->dirty = 0;
+
+       for (i = 0; i < page_count; i++) {
+               if (obj_priv->pages[i] == NULL)
+                       break;
+
+               if (obj_priv->dirty)
+                       set_page_dirty(obj_priv->pages[i]);
+
+               if (obj_priv->madv == I915_MADV_WILLNEED)
                        mark_page_accessed(obj_priv->pages[i]);
-                       page_cache_release(obj_priv->pages[i]);
-               }
+
+               page_cache_release(obj_priv->pages[i]);
+       }
        obj_priv->dirty = 0;
 
        drm_free_large(obj_priv->pages);
@@ -1451,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
        obj_priv->last_rendering_seqno = 0;
 }
 
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct inode *inode;
+
+       inode = obj->filp->f_path.dentry->d_inode;
+       if (inode->i_op->truncate)
+               inode->i_op->truncate (inode);
+
+       obj_priv->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 {
@@ -1494,7 +1597,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (file_priv != NULL)
                i915_file_priv = file_priv->driver_priv;
 
-       request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
        if (request == NULL)
                return 0;
 
@@ -1539,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 
                        if ((obj->write_domain & flush_domains) ==
                            obj->write_domain) {
+                               uint32_t old_write_domain = obj->write_domain;
+
                                obj->write_domain = 0;
                                i915_gem_object_move_to_active(obj, seqno);
+
+                               trace_i915_gem_object_change_domain(obj,
+                                                                   obj->read_domains,
+                                                                   old_write_domain);
                        }
                }
 
        }
 
-       if (was_empty && !dev_priv->mm.suspended)
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+       if (!dev_priv->mm.suspended) {
+               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               if (was_empty)
+                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+       }
        return seqno;
 }
 
@@ -1585,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
+       trace_i915_gem_request_retire(dev, request->seqno);
+
        /* Move any buffers on the active list that are no longer referenced
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
@@ -1633,7 +1747,7 @@ out:
 /**
  * Returns true if seq1 is later than seq2.
  */
-static int
+bool
 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 {
        return (int32_t)(seq1 - seq2) >= 0;
@@ -1656,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page)
+       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
                return;
 
        seqno = i915_get_gem_seqno(dev);
@@ -1671,15 +1785,21 @@ i915_gem_retire_requests(struct drm_device *dev)
                retiring_seqno = request->seqno;
 
                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   dev_priv->mm.wedged) {
+                   atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);
 
                        list_del(&request->list);
                        list_del(&request->client_list);
-                       drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+                       kfree(request);
                } else
                        break;
        }
+
+       if (unlikely (dev_priv->trace_irq_seqno &&
+                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+               i915_user_irq_put(dev);
+               dev_priv->trace_irq_seqno = 0;
+       }
 }
 
 void
@@ -1696,7 +1816,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        i915_gem_retire_requests(dev);
        if (!dev_priv->mm.suspended &&
            !list_empty(&dev_priv->mm.request_list))
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1713,6 +1833,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
                if (IS_IGDNG(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1725,16 +1848,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
                        i915_driver_irq_postinstall(dev);
                }
 
+               trace_i915_gem_request_wait_begin(dev, seqno);
+
                dev_priv->mm.waiting_gem_seqno = seqno;
                i915_user_irq_get(dev);
                ret = wait_event_interruptible(dev_priv->irq_queue,
                                               i915_seqno_passed(i915_get_gem_seqno(dev),
                                                                 seqno) ||
-                                              dev_priv->mm.wedged);
+                                              atomic_read(&dev_priv->mm.wedged));
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
+
+               trace_i915_gem_request_wait_end(dev, seqno);
        }
-       if (dev_priv->mm.wedged)
+       if (atomic_read(&dev_priv->mm.wedged))
                ret = -EIO;
 
        if (ret && ret != -ERESTARTSYS)
@@ -1765,6 +1892,8 @@ i915_gem_flush(struct drm_device *dev,
        DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
                  invalidate_domains, flush_domains);
 #endif
+       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+                                    invalidate_domains, flush_domains);
 
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
@@ -1863,7 +1992,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       loff_t offset;
        int ret = 0;
 
 #if WATCH_BUF
@@ -1878,6 +2006,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return -EINVAL;
        }
 
+       /* blow away mappings if mapped through GTT */
+       i915_gem_release_mmap(obj);
+
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
        /* Move the object to the CPU domain to ensure that
         * any possible CPU writes while it's not in the GTT
         * are flushed when we go to remap it. This will
@@ -1891,23 +2025,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return ret;
        }
 
+       BUG_ON(obj_priv->active);
+
        if (obj_priv->agp_mem != NULL) {
                drm_unbind_agp(obj_priv->agp_mem);
                drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
                obj_priv->agp_mem = NULL;
        }
 
-       BUG_ON(obj_priv->active);
-
-       /* blow away mappings if mapped through GTT */
-       offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
-       if (dev->dev_mapping)
-               unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
-
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-               i915_gem_clear_fence_reg(obj);
-
        i915_gem_object_put_pages(obj);
+       BUG_ON(obj_priv->pages_refcount);
 
        if (obj_priv->gtt_space) {
                atomic_dec(&dev->gtt_count);
@@ -1921,40 +2048,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        if (!list_empty(&obj_priv->list))
                list_del_init(&obj_priv->list);
 
+       if (i915_gem_object_is_purgeable(obj_priv))
+               i915_gem_object_truncate(obj);
+
+       trace_i915_gem_object_unbind(obj);
+
+       return 0;
+}
+
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *best = NULL;
+       struct drm_gem_object *first = NULL;
+
+       /* Try to find the smallest clean object */
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->size >= min_size) {
+                       if ((!obj_priv->dirty ||
+                            i915_gem_object_is_purgeable(obj_priv)) &&
+                           (!best || obj->size < best->size)) {
+                               best = obj;
+                               if (best->size == min_size)
+                                       return best;
+                       }
+                       if (!first)
+                           first = obj;
+               }
+       }
+
+       return best ? best : first;
+}
+
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t seqno;
+       int ret;
+       bool lists_empty;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->mm.active_list));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       if (lists_empty)
+               return -ENOSPC;
+
+       /* Flush everything (on to the inactive lists) and evict */
+       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+       if (seqno == 0)
+               return -ENOMEM;
+
+       ret = i915_wait_request(dev, seqno);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_evict_from_inactive_list(dev);
+       if (ret)
+               return ret;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->mm.active_list));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!lists_empty);
+
        return 0;
 }
 
 static int
-i915_gem_evict_something(struct drm_device *dev)
+i915_gem_evict_something(struct drm_device *dev, int min_size)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret = 0;
+       int ret;
 
        for (;;) {
+               i915_gem_retire_requests(dev);
+
                /* If there's an inactive buffer available now, grab it
                 * and be done.
                 */
-               if (!list_empty(&dev_priv->mm.inactive_list)) {
-                       obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
-                                                   struct drm_i915_gem_object,
-                                                   list);
-                       obj = obj_priv->obj;
-                       BUG_ON(obj_priv->pin_count != 0);
+               obj = i915_gem_find_inactive_object(dev, min_size);
+               if (obj) {
+                       struct drm_i915_gem_object *obj_priv;
+
 #if WATCH_LRU
                        DRM_INFO("%s: evicting %p\n", __func__, obj);
 #endif
+                       obj_priv = obj->driver_private;
+                       BUG_ON(obj_priv->pin_count != 0);
                        BUG_ON(obj_priv->active);
 
                        /* Wait on the rendering and unbind the buffer. */
-                       ret = i915_gem_object_unbind(obj);
-                       break;
+                       return i915_gem_object_unbind(obj);
                }
 
                /* If we didn't get anything, but the ring is still processing
-                * things, wait for one of those things to finish and hopefully
-                * leave us a buffer to evict.
+                * things, wait for the next to finish and hopefully leave us
+                * a buffer to evict.
                 */
                if (!list_empty(&dev_priv->mm.request_list)) {
                        struct drm_i915_gem_request *request;
@@ -1965,16 +2165,9 @@ i915_gem_evict_something(struct drm_device *dev)
 
                        ret = i915_wait_request(dev, request->seqno);
                        if (ret)
-                               break;
+                               return ret;
 
-                       /* if waiting caused an object to become inactive,
-                        * then loop around and wait for it. Otherwise, we
-                        * assume that waiting freed and unbound something,
-                        * so there should now be some space in the GTT
-                        */
-                       if (!list_empty(&dev_priv->mm.inactive_list))
-                               continue;
-                       break;
+                       continue;
                }
 
                /* If we didn't have anything on the request list but there
@@ -1983,46 +2176,44 @@ i915_gem_evict_something(struct drm_device *dev)
                 * will get moved to inactive.
                 */
                if (!list_empty(&dev_priv->mm.flushing_list)) {
-                       obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-                                                   struct drm_i915_gem_object,
-                                                   list);
-                       obj = obj_priv->obj;
+                       struct drm_i915_gem_object *obj_priv;
 
-                       i915_gem_flush(dev,
-                                      obj->write_domain,
-                                      obj->write_domain);
-                       i915_add_request(dev, NULL, obj->write_domain);
+                       /* Find an object that we can immediately reuse */
+                       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+                               obj = obj_priv->obj;
+                               if (obj->size >= min_size)
+                                       break;
 
-                       obj = NULL;
-                       continue;
-               }
+                               obj = NULL;
+                       }
 
-               DRM_ERROR("inactive empty %d request empty %d "
-                         "flushing empty %d\n",
-                         list_empty(&dev_priv->mm.inactive_list),
-                         list_empty(&dev_priv->mm.request_list),
-                         list_empty(&dev_priv->mm.flushing_list));
-               /* If we didn't do any of the above, there's nothing to be done
-                * and we just can't fit it in.
-                */
-               return -ENOSPC;
-       }
-       return ret;
-}
+                       if (obj != NULL) {
+                               uint32_t seqno;
 
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
-       int ret;
+                               i915_gem_flush(dev,
+                                              obj->write_domain,
+                                              obj->write_domain);
+                               seqno = i915_add_request(dev, NULL, obj->write_domain);
+                               if (seqno == 0)
+                                       return -ENOMEM;
 
-       for (;;) {
-               ret = i915_gem_evict_something(dev);
-               if (ret != 0)
-                       break;
+                               ret = i915_wait_request(dev, seqno);
+                               if (ret)
+                                       return ret;
+
+                               continue;
+                       }
+               }
+
+               /* If we didn't do any of the above, there's no single buffer
+                * large enough to swap out for the new one, so just evict
+                * everything and start again. (This should be rare.)
+                */
+               if (!list_empty (&dev_priv->mm.inactive_list))
+                       return i915_gem_evict_from_inactive_list(dev);
+               else
+                       return i915_gem_evict_everything(dev);
        }
-       if (ret == -ENOSPC)
-               return 0;
-       return ret;
 }
 
 int
@@ -2045,7 +2236,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
        BUG_ON(obj_priv->pages != NULL);
        obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
        if (obj_priv->pages == NULL) {
-               DRM_ERROR("Faled to allocate page list\n");
                obj_priv->pages_refcount--;
                return -ENOMEM;
        }
@@ -2056,7 +2246,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
                page = read_mapping_page(mapping, i, NULL);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
-                       DRM_ERROR("read_mapping_page failed: %d\n", ret);
                        i915_gem_object_put_pages(obj);
                        return ret;
                }
@@ -2163,13 +2352,11 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        val |= I830_FENCE_REG_VALID;
 
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
-
 }
 
 /**
  * i915_gem_object_get_fence_reg - set up a fence reg for an object
  * @obj: object to map through a fence reg
- * @write: object is about to be written
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -2180,8 +2367,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  * It then sets up the reg based on the object's properties: address, pitch
  * and tiling format.
  */
-static int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
+int
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2190,6 +2377,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
        struct drm_i915_gem_object *old_obj_priv = NULL;
        int i, ret, avail;
 
+       /* Just update our place in the LRU if our fence is getting used. */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               return 0;
+       }
+
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
                WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2211,7 +2404,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
        }
 
        /* First try to find a free reg */
-try_again:
        avail = 0;
        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
@@ -2225,67 +2417,62 @@ try_again:
 
        /* None available, try to steal one or wait for a user to finish */
        if (i == dev_priv->num_fence_regs) {
-               uint32_t seqno = dev_priv->mm.next_gem_seqno;
-               loff_t offset;
+               struct drm_gem_object *old_obj = NULL;
 
                if (avail == 0)
                        return -ENOSPC;
 
-               for (i = dev_priv->fence_reg_start;
-                    i < dev_priv->num_fence_regs; i++) {
-                       uint32_t this_seqno;
-
-                       reg = &dev_priv->fence_regs[i];
-                       old_obj_priv = reg->obj->driver_private;
+               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+                                   fence_list) {
+                       old_obj = old_obj_priv->obj;
 
                        if (old_obj_priv->pin_count)
                                continue;
 
+                       /* Take a reference, as otherwise the wait_rendering
+                        * below may cause the object to get freed out from
+                        * under us.
+                        */
+                       drm_gem_object_reference(old_obj);
+
                        /* i915 uses fences for GPU access to tiled buffers */
                        if (IS_I965G(dev) || !old_obj_priv->active)
                                break;
 
-                       /* find the seqno of the first available fence */
-                       this_seqno = old_obj_priv->last_rendering_seqno;
-                       if (this_seqno != 0 &&
-                           reg->obj->write_domain == 0 &&
-                           i915_seqno_passed(seqno, this_seqno))
-                               seqno = this_seqno;
-               }
-
-               /*
-                * Now things get ugly... we have to wait for one of the
-                * objects to finish before trying again.
-                */
-               if (i == dev_priv->num_fence_regs) {
-                       if (seqno == dev_priv->mm.next_gem_seqno) {
-                               i915_gem_flush(dev,
-                                              I915_GEM_GPU_DOMAINS,
-                                              I915_GEM_GPU_DOMAINS);
-                               seqno = i915_add_request(dev, NULL,
-                                                        I915_GEM_GPU_DOMAINS);
-                               if (seqno == 0)
-                                       return -ENOMEM;
+                       /* This brings the object to the head of the LRU if it
+                        * had been written to.  The only way this should
+                        * result in us waiting longer than the expected
+                        * optimal amount of time is if there was a
+                        * fence-using buffer later that was read-only.
+                        */
+                       i915_gem_object_flush_gpu_write_domain(old_obj);
+                       ret = i915_gem_object_wait_rendering(old_obj);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(old_obj);
+                               return ret;
                        }
 
-                       ret = i915_wait_request(dev, seqno);
-                       if (ret)
-                               return ret;
-                       goto try_again;
+                       break;
                }
 
                /*
                 * Zap this virtual mapping so we can set up a fence again
                 * for this object next time we need it.
                 */
-               offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
-               if (dev->dev_mapping)
-                       unmap_mapping_range(dev->dev_mapping, offset,
-                                           reg->obj->size, 1);
+               i915_gem_release_mmap(old_obj);
+
+               i = old_obj_priv->fence_reg;
+               reg = &dev_priv->fence_regs[i];
+
                old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+               list_del_init(&old_obj_priv->fence_list);
+
+               drm_gem_object_unreference(old_obj);
        }
 
        obj_priv->fence_reg = i;
+       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
        reg->obj = obj;
 
        if (IS_I965G(dev))
@@ -2295,6 +2482,8 @@ try_again:
        else
                i830_write_fence_reg(reg);
 
+       trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
        return 0;
 }
 
@@ -2328,26 +2517,70 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 
        dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       list_del_init(&obj_priv->fence_list);
 }
 
 /**
- * Finds free space in the GTT aperture and binds the object there.
+ * i915_gem_object_put_fence_reg - waits on outstanding fenced access
+ * to the buffer to finish, and then resets the fence register.
+ * @obj: tiled object holding a fence register.
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
  */
-static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+int
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       struct drm_mm_node *free_space;
-       int page_count, ret;
 
-       if (dev_priv->mm.suspended)
-               return -EBUSY;
-       if (alignment == 0)
-               alignment = i915_gem_get_gtt_alignment(obj);
-       if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
-               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+       if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+               return 0;
+
+       /* On the i915, GPU access to tiled buffers is via a fence,
+        * therefore we must wait for any outstanding access to complete
+        * before clearing the fence.
+        */
+       if (!IS_I965G(dev)) {
+               int ret;
+
+               i915_gem_object_flush_gpu_write_domain(obj);
+               i915_gem_object_flush_gtt_write_domain(obj);
+               ret = i915_gem_object_wait_rendering(obj);
+               if (ret != 0)
+                       return ret;
+       }
+
+       i915_gem_clear_fence_reg (obj);
+
+       return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_mm_node *free_space;
+       bool retry_alloc = false;
+       int ret;
+
+       if (dev_priv->mm.suspended)
+               return -EBUSY;
+
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to bind a purgeable object\n");
+               return -EINVAL;
+       }
+
+       if (alignment == 0)
+               alignment = i915_gem_get_gtt_alignment(obj);
+       if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
                return -EINVAL;
        }
 
@@ -2363,58 +2596,73 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                }
        }
        if (obj_priv->gtt_space == NULL) {
-               bool lists_empty;
-
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
 #if WATCH_LRU
                DRM_INFO("%s: GTT full, evicting something\n", __func__);
 #endif
-               spin_lock(&dev_priv->mm.active_list_lock);
-               lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                              list_empty(&dev_priv->mm.flushing_list) &&
-                              list_empty(&dev_priv->mm.active_list));
-               spin_unlock(&dev_priv->mm.active_list_lock);
-               if (lists_empty) {
-                       DRM_ERROR("GTT full, but LRU list empty\n");
-                       return -ENOSPC;
-               }
-
-               ret = i915_gem_evict_something(dev);
-               if (ret != 0) {
-                       if (ret != -ERESTARTSYS)
-                               DRM_ERROR("Failed to evict a buffer %d\n", ret);
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
                        return ret;
-               }
+
                goto search_free;
        }
 
 #if WATCH_BUF
-       DRM_INFO("Binding object of size %d at 0x%08x\n",
+       DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
+       if (retry_alloc) {
+               i915_gem_object_set_page_gfp_mask (obj,
+                                                  i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+       }
        ret = i915_gem_object_get_pages(obj);
+       if (retry_alloc) {
+               i915_gem_object_set_page_gfp_mask (obj,
+                                                  i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+       }
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
+
+               if (ret == -ENOMEM) {
+                       /* first try to clear up some space from the GTT */
+                       ret = i915_gem_evict_something(dev, obj->size);
+                       if (ret) {
+                               /* now try to shrink everyone else */
+                               if (! retry_alloc) {
+                                   retry_alloc = true;
+                                   goto search_free;
+                               }
+
+                               return ret;
+                       }
+
+                       goto search_free;
+               }
+
                return ret;
        }
 
-       page_count = obj->size / PAGE_SIZE;
        /* Create an AGP memory structure pointing at our pages, and bind it
         * into the GTT.
         */
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
                                               obj_priv->pages,
-                                              page_count,
+                                              obj->size >> PAGE_SHIFT,
                                               obj_priv->gtt_offset,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
                i915_gem_object_put_pages(obj);
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
-               return -ENOMEM;
+
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
+                       return ret;
+
+               goto search_free;
        }
        atomic_inc(&dev->gtt_count);
        atomic_add(obj->size, &dev->gtt_memory);
@@ -2426,6 +2674,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
        BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
+       trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
        return 0;
 }
 
@@ -2441,15 +2691,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
        if (obj_priv->pages == NULL)
                return;
 
-       /* XXX: The 865 in particular appears to be weird in how it handles
-        * cache flushing.  We haven't figured it out, but the
-        * clflush+agp_chipset_flush doesn't appear to successfully get the
-        * data visible to the PGU, while wbinvd + agp_chipset_flush does.
-        */
-       if (IS_I865G(obj->dev)) {
-               wbinvd();
-               return;
-       }
+       trace_i915_gem_object_clflush(obj);
 
        drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
 }
@@ -2460,21 +2702,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        uint32_t seqno;
+       uint32_t old_write_domain;
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return;
 
        /* Queue the GPU write cache flushing we need. */
+       old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
        seqno = i915_add_request(dev, NULL, obj->write_domain);
        obj->write_domain = 0;
        i915_gem_object_move_to_active(obj, seqno);
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
 {
+       uint32_t old_write_domain;
+
        if (obj->write_domain != I915_GEM_DOMAIN_GTT)
                return;
 
@@ -2482,7 +2732,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
         * to it immediately go to main memory as far as we know, so there's
         * no chipset flush.  It also doesn't land in render cache.
         */
+       old_write_domain = obj->write_domain;
        obj->write_domain = 0;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
@@ -2490,13 +2745,19 @@ static void
 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       uint32_t old_write_domain;
 
        if (obj->write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
        i915_gem_clflush_object(obj);
        drm_agp_chipset_flush(dev);
+       old_write_domain = obj->write_domain;
        obj->write_domain = 0;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           obj->read_domains,
+                                           old_write_domain);
 }
 
 /**
@@ -2509,6 +2770,7 @@ int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_write_domain, old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
@@ -2521,6 +2783,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        if (ret != 0)
                return ret;
 
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
        /* If we're writing through the GTT domain, then CPU and GPU caches
         * will need to be invalidated at next use.
         */
@@ -2539,6 +2804,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
                obj_priv->dirty = 1;
        }
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
        return 0;
 }
 
@@ -2551,6 +2820,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 static int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
+       uint32_t old_write_domain, old_read_domains;
        int ret;
 
        i915_gem_object_flush_gpu_write_domain(obj);
@@ -2566,6 +2836,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
 
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
                i915_gem_clflush_object(obj);
@@ -2586,6 +2859,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
        return 0;
 }
 
@@ -2707,10 +2984,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
        struct drm_i915_gem_object      *obj_priv = obj->driver_private;
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
+       uint32_t                        old_read_domains;
 
        BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
        BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
 
+       intel_mark_busy(dev, obj);
+
 #if WATCH_BUF
        DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
                 __func__, obj,
@@ -2751,6 +3031,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
                i915_gem_clflush_object(obj);
        }
 
+       old_read_domains = obj->read_domains;
+
        /* The actual obj->write_domain will be updated with
         * pending_write_domain after we emit the accumulated flush for all
         * of our domain changes in execbuffers (which clears objects'
@@ -2769,6 +3051,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
                 obj->read_domains, obj->write_domain,
                 dev->invalidate_domains, dev->flush_domains);
 #endif
+
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           obj->write_domain);
 }
 
 /**
@@ -2800,8 +3086,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
        /* Free the page_cpu_valid mappings which are now stale, whether
         * or not we've got I915_GEM_DOMAIN_CPU.
         */
-       drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
-                DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
        obj_priv->page_cpu_valid = NULL;
 }
 
@@ -2822,6 +3107,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                          uint64_t offset, uint64_t size)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_read_domains;
        int i, ret;
 
        if (offset == 0 && size == obj->size)
@@ -2843,8 +3129,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
         * newly adding I915_GEM_DOMAIN_CPU
         */
        if (obj_priv->page_cpu_valid == NULL) {
-               obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
-                                                     DRM_MEM_DRIVER);
+               obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
+                                                  GFP_KERNEL);
                if (obj_priv->page_cpu_valid == NULL)
                        return -ENOMEM;
        } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
@@ -2868,8 +3154,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
+       old_read_domains = obj->read_domains;
        obj->read_domains |= I915_GEM_DOMAIN_CPU;
 
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           obj->write_domain);
+
        return 0;
 }
 
@@ -2913,6 +3204,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                }
                target_obj_priv = target_obj->driver_private;
 
+#if WATCH_RELOC
+               DRM_INFO("%s: obj %p offset %08x target %d "
+                        "read %08x write %08x gtt %08x "
+                        "presumed %08x delta %08x\n",
+                        __func__,
+                        obj,
+                        (int) reloc->offset,
+                        (int) reloc->target_handle,
+                        (int) reloc->read_domains,
+                        (int) reloc->write_domain,
+                        (int) target_obj_priv->gtt_offset,
+                        (int) reloc->presumed_offset,
+                        reloc->delta);
+#endif
+
                /* The target buffer should have appeared before us in the
                 * exec_object list, so it should have a GTT space bound by now.
                 */
@@ -2924,25 +3230,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        return -EINVAL;
                }
 
-               if (reloc->offset > obj->size - 4) {
-                       DRM_ERROR("Relocation beyond object bounds: "
-                                 "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset, (int) obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-               if (reloc->offset & 3) {
-                       DRM_ERROR("Relocation not 4-byte aligned: "
-                                 "obj %p target %d offset %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-
+               /* Validate that the target is in a valid r/w GPU domain */
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
                    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
@@ -2956,7 +3244,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
-
                if (reloc->write_domain && target_obj->pending_write_domain &&
                    reloc->write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
@@ -2971,21 +3258,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        return -EINVAL;
                }
 
-#if WATCH_RELOC
-               DRM_INFO("%s: obj %p offset %08x target %d "
-                        "read %08x write %08x gtt %08x "
-                        "presumed %08x delta %08x\n",
-                        __func__,
-                        obj,
-                        (int) reloc->offset,
-                        (int) reloc->target_handle,
-                        (int) reloc->read_domains,
-                        (int) reloc->write_domain,
-                        (int) target_obj_priv->gtt_offset,
-                        (int) reloc->presumed_offset,
-                        reloc->delta);
-#endif
-
                target_obj->pending_read_domains |= reloc->read_domains;
                target_obj->pending_write_domain |= reloc->write_domain;
 
@@ -2997,6 +3269,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        continue;
                }
 
+               /* Check that the relocation address is valid... */
+               if (reloc->offset > obj->size - 4) {
+                       DRM_ERROR("Relocation beyond object bounds: "
+                                 "obj %p target %d offset %d size %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset, (int) obj->size);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+               if (reloc->offset & 3) {
+                       DRM_ERROR("Relocation not 4-byte aligned: "
+                                 "obj %p target %d offset %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
+               /* and points to somewhere within the target object. */
+               if (reloc->delta >= target_obj->size) {
+                       DRM_ERROR("Relocation beyond target object bounds: "
+                                 "obj %p target %d delta %d size %d.\n",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->delta, (int) target_obj->size);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
                ret = i915_gem_object_set_to_gtt_domain(obj, 1);
                if (ret != 0) {
                        drm_gem_object_unreference(target_obj);
@@ -3055,6 +3358,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
 
+       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
        count = nbox ? nbox : 1;
 
        for (i = 0; i < count; i++) {
@@ -3267,8 +3572,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        }
 
        if (args->num_cliprects != 0) {
-               cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
-                                      DRM_MEM_DRIVER);
+               cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
+                                   GFP_KERNEL);
                if (cliprects == NULL)
                        goto pre_mutex_err;
 
@@ -3292,7 +3597,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EIO;
@@ -3350,8 +3655,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
                /* error other than GTT full, or we've already tried again */
                if (ret != -ENOSPC || pin_tries >= 1) {
-                       if (ret != -ERESTARTSYS)
-                               DRM_ERROR("Failed to pin buffers %d\n", ret);
+                       if (ret != -ERESTARTSYS) {
+                               unsigned long long total_size = 0;
+                               for (i = 0; i < args->buffer_count; i++)
+                                       total_size += object_list[i]->size;
+                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+                                         pinned+1, args->buffer_count,
+                                         total_size, ret);
+                               DRM_ERROR("%d objects [%d pinned], "
+                                         "%d object bytes [%d pinned], "
+                                         "%d/%d gtt bytes\n",
+                                         atomic_read(&dev->object_count),
+                                         atomic_read(&dev->pin_count),
+                                         atomic_read(&dev->object_memory),
+                                         atomic_read(&dev->pin_memory),
+                                         atomic_read(&dev->gtt_memory),
+                                         dev->gtt_total);
+                       }
                        goto err;
                }
 
@@ -3362,7 +3682,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
                /* evict everyone we can from the aperture */
                ret = i915_gem_evict_everything(dev);
-               if (ret)
+               if (ret && ret != -ENOSPC)
                        goto err;
        }
 
@@ -3418,8 +3738,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
+               uint32_t old_write_domain = obj->write_domain;
 
                obj->write_domain = obj->pending_write_domain;
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3521,8 +3845,7 @@ err:
 pre_mutex_err:
        drm_free_large(object_list);
        drm_free_large(exec_list);
-       drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
-                DRM_MEM_DRIVER);
+       kfree(cliprects);
 
        return ret;
 }
@@ -3537,20 +3860,15 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment);
-               if (ret != 0) {
-                       if (ret != -EBUSY && ret != -ERESTARTSYS)
-                               DRM_ERROR("Failure to bind: %d\n", ret);
+               if (ret)
                        return ret;
-               }
        }
        /*
         * Pre-965 chips need a fence register set up in order to
         * properly handle tiled surfaces.
         */
-       if (!IS_I965G(dev) &&
-           obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj, true);
+       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
                        if (ret != -EBUSY && ret != -ERESTARTSYS)
                                DRM_ERROR("Failure to install fence: %d\n",
@@ -3623,6 +3941,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
        obj_priv = obj->driver_private;
 
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+               DRM_ERROR("Attempting to pin a purgeable buffer\n");
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
        if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -3735,11 +4060,61 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
     return i915_gem_ring_throttle(dev, file_priv);
 }
 
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_i915_gem_madvise *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       switch (args->madv) {
+       case I915_MADV_DONTNEED:
+       case I915_MADV_WILLNEED:
+           break;
+       default:
+           return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+                         args->handle);
+               return -EBADF;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       obj_priv = obj->driver_private;
+
+       if (obj_priv->pin_count) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+
+               DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+               return -EINVAL;
+       }
+
+       if (obj_priv->madv != __I915_MADV_PURGED)
+               obj_priv->madv = args->madv;
+
+       /* if the object is no longer bound, discard its backing storage */
+       if (i915_gem_object_is_purgeable(obj_priv) &&
+           obj_priv->gtt_space == NULL)
+               i915_gem_object_truncate(obj);
+
+       args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 int i915_gem_init_object(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv;
 
-       obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+       obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
        if (obj_priv == NULL)
                return -ENOMEM;
 
@@ -3758,6 +4133,10 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->fence_list);
+       obj_priv->madv = I915_MADV_WILLNEED;
+
+       trace_i915_gem_object_create(obj);
 
        return 0;
 }
@@ -3767,6 +4146,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
+       trace_i915_gem_object_destroy(obj);
+
        while (obj_priv->pin_count > 0)
                i915_gem_object_unpin(obj);
 
@@ -3775,43 +4156,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
        i915_gem_object_unbind(obj);
 
-       i915_gem_free_mmap_offset(obj);
+       if (obj_priv->mmap_offset)
+               i915_gem_free_mmap_offset(obj);
 
-       drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
-       drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+       kfree(obj->driver_private);
 }
 
-/** Unbinds all objects that are on the given buffer list. */
+/** Unbinds all inactive objects. */
 static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
 {
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       while (!list_empty(head)) {
-               obj_priv = list_first_entry(head,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj = obj_priv->obj;
+       while (!list_empty(&dev_priv->mm.inactive_list)) {
+               struct drm_gem_object *obj;
+               int ret;
 
-               if (obj_priv->pin_count != 0) {
-                       DRM_ERROR("Pinned object in unbind list\n");
-                       mutex_unlock(&dev->struct_mutex);
-                       return -EINVAL;
-               }
+               obj = list_first_entry(&dev_priv->mm.inactive_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
 
                ret = i915_gem_object_unbind(obj);
                if (ret != 0) {
-                       DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
-                                 ret);
-                       mutex_unlock(&dev->struct_mutex);
+                       DRM_ERROR("Error unbinding object: %d\n", ret);
                        return ret;
                }
        }
 
-
        return 0;
 }
 
@@ -3833,6 +4206,7 @@ i915_gem_idle(struct drm_device *dev)
         * We need to replace this with a semaphore, or something.
         */
        dev_priv->mm.suspended = 1;
+       del_timer(&dev_priv->hangcheck_timer);
 
        /* Cancel the retire work handler, wait for it to finish if running
         */
@@ -3862,7 +4236,7 @@ i915_gem_idle(struct drm_device *dev)
                if (last_seqno == cur_seqno) {
                        if (stuck++ > 100) {
                                DRM_ERROR("hardware wedged\n");
-                               dev_priv->mm.wedged = 1;
+                               atomic_set(&dev_priv->mm.wedged, 1);
                                DRM_WAKEUP(&dev_priv->irq_queue);
                                break;
                        }
@@ -3875,7 +4249,7 @@ i915_gem_idle(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       if (!dev_priv->mm.wedged) {
+       if (!atomic_read(&dev_priv->mm.wedged)) {
                /* Active and flushing should now be empty as we've
                 * waited for a sequence higher than any pending execbuffer
                 */
@@ -3893,29 +4267,41 @@ i915_gem_idle(struct drm_device *dev)
         * the GPU domains and just stuff them onto inactive.
         */
        while (!list_empty(&dev_priv->mm.active_list)) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_gem_object *obj;
+               uint32_t old_write_domain;
 
-               obj_priv = list_first_entry(&dev_priv->mm.active_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj_priv->obj);
+               obj = list_first_entry(&dev_priv->mm.active_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
+               old_write_domain = obj->write_domain;
+               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+               i915_gem_object_move_to_inactive(obj);
+
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
        spin_unlock(&dev_priv->mm.active_list_lock);
 
        while (!list_empty(&dev_priv->mm.flushing_list)) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_gem_object *obj;
+               uint32_t old_write_domain;
 
-               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj_priv->obj);
+               obj = list_first_entry(&dev_priv->mm.flushing_list,
+                                      struct drm_i915_gem_object,
+                                      list)->obj;
+               old_write_domain = obj->write_domain;
+               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+               i915_gem_object_move_to_inactive(obj);
+
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   old_write_domain);
        }
 
 
        /* Move all inactive buffers out of the GTT. */
-       ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+       ret = i915_gem_evict_from_inactive_list(dev);
        WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@ -4031,7 +4417,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 
        /* Set up the kernel mapping for the ring. */
        ring->Size = obj->size;
-       ring->tail_mask = obj->size - 1;
 
        ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
        ring->map.size = obj->size;
@@ -4138,9 +4523,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               dev_priv->mm.wedged = 0;
+               atomic_set(&dev_priv->mm.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -4170,15 +4555,11 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
-
-       return ret;
+       return i915_gem_idle(dev);
 }
 
 void
@@ -4197,6 +4578,7 @@ i915_gem_lastclose(struct drm_device *dev)
 void
 i915_gem_load(struct drm_device *dev)
 {
+       int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4204,10 +4586,15 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       spin_lock(&shrink_list_lock);
+       list_add(&dev_priv->mm.shrink_list, &shrink_list);
+       spin_unlock(&shrink_list_lock);
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
@@ -4216,6 +4603,18 @@ i915_gem_load(struct drm_device *dev)
        else
                dev_priv->num_fence_regs = 8;
 
+       /* Initialize fence registers to zero */
+       if (IS_I965G(dev)) {
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+       } else {
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       }
+
        i915_gem_detect_bit_6_swizzle(dev);
 }
 
@@ -4233,7 +4632,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
        if (dev_priv->mm.phys_objs[id - 1] || !size)
                return 0;
 
-       phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
        if (!phys_obj)
                return -ENOMEM;
 
@@ -4252,7 +4651,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
 
        return 0;
 kfree_obj:
-       drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       kfree(phys_obj);
        return ret;
 }
 
@@ -4312,6 +4711,8 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        }
        drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
+
+       i915_gem_object_put_pages(obj);
 out:
        obj_priv->phys_obj->cur_obj = NULL;
        obj_priv->phys_obj = NULL;
@@ -4369,6 +4770,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                kunmap_atomic(src, KM_USER0);
        }
 
+       i915_gem_object_put_pages(obj);
+
        return 0;
 out:
        return ret;
@@ -4409,3 +4812,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
                list_del_init(i915_file_priv->mm.request_list.next);
        mutex_unlock(&dev->struct_mutex);
 }
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       drm_i915_private_t *dev_priv, *next_dev;
+       struct drm_i915_gem_object *obj_priv, *next_obj;
+       int cnt = 0;
+       int would_deadlock = 1;
+
+       /* "fast-path" to count number of available objects */
+       if (nr_to_scan == 0) {
+               spin_lock(&shrink_list_lock);
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (mutex_trylock(&dev->struct_mutex)) {
+                               list_for_each_entry(obj_priv,
+                                                   &dev_priv->mm.inactive_list,
+                                                   list)
+                                       cnt++;
+                               mutex_unlock(&dev->struct_mutex);
+                       }
+               }
+               spin_unlock(&shrink_list_lock);
+
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       }
+
+       spin_lock(&shrink_list_lock);
+
+       /* first scan for clean buffers */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               i915_gem_retire_requests(dev);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (i915_gem_object_is_purgeable(obj_priv)) {
+                               i915_gem_object_unbind(obj_priv->obj);
+                               if (--nr_to_scan <= 0)
+                                       break;
+                       }
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+
+               if (nr_to_scan <= 0)
+                       break;
+       }
+
+       /* second pass, evict/count anything still on the inactive list */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (nr_to_scan > 0) {
+                               i915_gem_object_unbind(obj_priv->obj);
+                               nr_to_scan--;
+                       } else
+                               cnt++;
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+       }
+
+       spin_unlock(&shrink_list_lock);
+
+       if (would_deadlock)
+               return -1;
+       else if (cnt > 0)
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       else
+               return 0;
+}
+
+static struct shrinker shrinker = {
+       .shrink = i915_gem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+    register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+    unregister_shrinker(&shrinker);
+}