drm/i915: Register a shrinker to free inactive lists under memory pressure
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
index c0ae6bb..2fff2e0 100644 (file)
@@ -29,6 +29,7 @@
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 #include <linux/swap.h>
 #include <linux/pci.h>
 
@@ -46,13 +47,15 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
-static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
@@ -112,7 +115,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_create *args = data;
        struct drm_gem_object *obj;
-       int handle, ret;
+       int ret;
+       u32 handle;
 
        args->size = roundup(args->size, PAGE_SIZE);
 
@@ -979,8 +983,10 @@ int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
        struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
        uint32_t read_domains = args->read_domains;
        uint32_t write_domain = args->write_domain;
        int ret;
@@ -1004,15 +1010,27 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
+       obj_priv = obj->driver_private;
 
        mutex_lock(&dev->struct_mutex);
+
+       intel_mark_busy(dev, obj);
+
 #if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
                 obj, obj->size, read_domains, write_domain);
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
+               /* Update the LRU on the fence for the CPU access that's
+                * about to occur.
+                */
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       list_move_tail(&obj_priv->fence_list,
+                                      &dev_priv->mm.fence_list);
+               }
+
                /* Silently promote "you're not bound, there was nothing to do"
                 * to success, since the client was just asking us to
                 * make sure everything was done.
@@ -1051,7 +1069,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
 #if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %d)\n",
+       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
        obj_priv = obj->driver_private;
@@ -1140,25 +1158,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
        if (!obj_priv->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return VM_FAULT_SIGBUS;
                }
+               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
                ret = i915_gem_object_set_to_gtt_domain(obj, write);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return VM_FAULT_SIGBUS;
                }
-
-               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
        }
 
        /* Need a new fence register? */
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj, write);
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return VM_FAULT_SIGBUS;
@@ -1208,8 +1224,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 
        /* Set the object up for mmap'ing */
        list = &obj->map_list;
-       list->map = drm_calloc(1, sizeof(struct drm_map_list),
-                              DRM_MEM_DRIVER);
+       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
        if (!list->map)
                return -ENOMEM;
 
@@ -1249,11 +1264,36 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 out_free_mm:
        drm_mm_put_block(list->file_offset_node);
 out_free_list:
-       drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+       kfree(list->map);
 
        return ret;
 }
 
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmaping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (dev->dev_mapping)
+               unmap_mapping_range(dev->dev_mapping,
+                                   obj_priv->mmap_offset, obj->size, 1);
+}
+
 static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
@@ -1271,7 +1311,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
        }
 
        if (list->map) {
-               drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
+               kfree(list->map);
                list->map = NULL;
        }
 
@@ -1361,22 +1401,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        args->offset = obj_priv->mmap_offset;
 
-       obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
-       /* Make sure the alignment is correct for fence regs etc */
-       if (obj_priv->agp_mem &&
-           (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
        /*
         * Pull it into the GTT so that we have a page list (makes the
         * initial fault faster and any subsequent flushing possible).
         */
        if (!obj_priv->agp_mem) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        drm_gem_object_unreference(obj);
                        mutex_unlock(&dev->struct_mutex);
@@ -1494,7 +1524,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (file_priv != NULL)
                i915_file_priv = file_priv->driver_priv;
 
-       request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
        if (request == NULL)
                return 0;
 
@@ -1546,8 +1576,11 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 
        }
 
-       if (was_empty && !dev_priv->mm.suspended)
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+       if (!dev_priv->mm.suspended) {
+               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               if (was_empty)
+                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+       }
        return seqno;
 }
 
@@ -1633,7 +1666,7 @@ out:
 /**
  * Returns true if seq1 is later than seq2.
  */
-static int
+bool
 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 {
        return (int32_t)(seq1 - seq2) >= 0;
@@ -1671,12 +1704,12 @@ i915_gem_retire_requests(struct drm_device *dev)
                retiring_seqno = request->seqno;
 
                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   dev_priv->mm.wedged) {
+                   atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);
 
                        list_del(&request->list);
                        list_del(&request->client_list);
-                       drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+                       kfree(request);
                } else
                        break;
        }
@@ -1696,7 +1729,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        i915_gem_retire_requests(dev);
        if (!dev_priv->mm.suspended &&
            !list_empty(&dev_priv->mm.request_list))
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1713,6 +1746,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
                if (IS_IGDNG(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1730,11 +1766,11 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
                ret = wait_event_interruptible(dev_priv->irq_queue,
                                               i915_seqno_passed(i915_get_gem_seqno(dev),
                                                                 seqno) ||
-                                              dev_priv->mm.wedged);
+                                              atomic_read(&dev_priv->mm.wedged));
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
        }
-       if (dev_priv->mm.wedged)
+       if (atomic_read(&dev_priv->mm.wedged))
                ret = -EIO;
 
        if (ret && ret != -ERESTARTSYS)
@@ -1863,7 +1899,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       loff_t offset;
        int ret = 0;
 
 #if WATCH_BUF
@@ -1878,6 +1913,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return -EINVAL;
        }
 
+       /* blow away mappings if mapped through GTT */
+       i915_gem_release_mmap(obj);
+
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
        /* Move the object to the CPU domain to ensure that
         * any possible CPU writes while it's not in the GTT
         * are flushed when we go to remap it. This will
@@ -1891,22 +1932,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                return ret;
        }
 
+       BUG_ON(obj_priv->active);
+
        if (obj_priv->agp_mem != NULL) {
                drm_unbind_agp(obj_priv->agp_mem);
                drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
                obj_priv->agp_mem = NULL;
        }
 
-       BUG_ON(obj_priv->active);
-
-       /* blow away mappings if mapped through GTT */
-       offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
-       if (dev->dev_mapping)
-               unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
-
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-               i915_gem_clear_fence_reg(obj);
-
        i915_gem_object_put_pages(obj);
 
        if (obj_priv->gtt_space) {
@@ -2163,13 +2196,11 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        val |= I830_FENCE_REG_VALID;
 
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
-
 }
 
 /**
  * i915_gem_object_get_fence_reg - set up a fence reg for an object
  * @obj: object to map through a fence reg
- * @write: object is about to be written
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -2180,8 +2211,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
  * It then sets up the reg based on the object's properties: address, pitch
  * and tiling format.
  */
-static int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
+int
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2190,6 +2221,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
        struct drm_i915_gem_object *old_obj_priv = NULL;
        int i, ret, avail;
 
+       /* Just update our place in the LRU if our fence is getting used. */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               return 0;
+       }
+
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
                WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2211,7 +2248,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
        }
 
        /* First try to find a free reg */
-try_again:
        avail = 0;
        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
@@ -2225,67 +2261,62 @@ try_again:
 
        /* None available, try to steal one or wait for a user to finish */
        if (i == dev_priv->num_fence_regs) {
-               uint32_t seqno = dev_priv->mm.next_gem_seqno;
-               loff_t offset;
+               struct drm_gem_object *old_obj = NULL;
 
                if (avail == 0)
                        return -ENOSPC;
 
-               for (i = dev_priv->fence_reg_start;
-                    i < dev_priv->num_fence_regs; i++) {
-                       uint32_t this_seqno;
-
-                       reg = &dev_priv->fence_regs[i];
-                       old_obj_priv = reg->obj->driver_private;
+               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+                                   fence_list) {
+                       old_obj = old_obj_priv->obj;
 
                        if (old_obj_priv->pin_count)
                                continue;
 
+                       /* Take a reference, as otherwise the wait_rendering
+                        * below may cause the object to get freed out from
+                        * under us.
+                        */
+                       drm_gem_object_reference(old_obj);
+
                        /* i915 uses fences for GPU access to tiled buffers */
                        if (IS_I965G(dev) || !old_obj_priv->active)
                                break;
 
-                       /* find the seqno of the first available fence */
-                       this_seqno = old_obj_priv->last_rendering_seqno;
-                       if (this_seqno != 0 &&
-                           reg->obj->write_domain == 0 &&
-                           i915_seqno_passed(seqno, this_seqno))
-                               seqno = this_seqno;
-               }
-
-               /*
-                * Now things get ugly... we have to wait for one of the
-                * objects to finish before trying again.
-                */
-               if (i == dev_priv->num_fence_regs) {
-                       if (seqno == dev_priv->mm.next_gem_seqno) {
-                               i915_gem_flush(dev,
-                                              I915_GEM_GPU_DOMAINS,
-                                              I915_GEM_GPU_DOMAINS);
-                               seqno = i915_add_request(dev, NULL,
-                                                        I915_GEM_GPU_DOMAINS);
-                               if (seqno == 0)
-                                       return -ENOMEM;
+                       /* This brings the object to the head of the LRU if it
+                        * had been written to.  The only way this should
+                        * result in us waiting longer than the expected
+                        * optimal amount of time is if there was a
+                        * fence-using buffer later that was read-only.
+                        */
+                       i915_gem_object_flush_gpu_write_domain(old_obj);
+                       ret = i915_gem_object_wait_rendering(old_obj);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(old_obj);
+                               return ret;
                        }
 
-                       ret = i915_wait_request(dev, seqno);
-                       if (ret)
-                               return ret;
-                       goto try_again;
+                       break;
                }
 
                /*
                 * Zap this virtual mapping so we can set up a fence again
                 * for this object next time we need it.
                 */
-               offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
-               if (dev->dev_mapping)
-                       unmap_mapping_range(dev->dev_mapping, offset,
-                                           reg->obj->size, 1);
+               i915_gem_release_mmap(old_obj);
+
+               i = old_obj_priv->fence_reg;
+               reg = &dev_priv->fence_regs[i];
+
                old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+               list_del_init(&old_obj_priv->fence_list);
+
+               drm_gem_object_unreference(old_obj);
        }
 
        obj_priv->fence_reg = i;
+       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
        reg->obj = obj;
 
        if (IS_I965G(dev))
@@ -2328,6 +2359,43 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 
        dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       list_del_init(&obj_priv->fence_list);
+}
+
+/**
+ * i915_gem_object_put_fence_reg - waits on outstanding fenced access
+ * to the buffer to finish, and then resets the fence register.
+ * @obj: tiled object holding a fence register.
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
+ */
+int
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+               return 0;
+
+       /* On the i915, GPU access to tiled buffers is via a fence,
+        * therefore we must wait for any outstanding access to complete
+        * before clearing the fence.
+        */
+       if (!IS_I965G(dev)) {
+               int ret;
+
+               i915_gem_object_flush_gpu_write_domain(obj);
+               i915_gem_object_flush_gtt_write_domain(obj);
+               ret = i915_gem_object_wait_rendering(obj);
+               if (ret != 0)
+                       return ret;
+       }
+
+       i915_gem_clear_fence_reg (obj);
+
+       return 0;
 }
 
 /**
@@ -2391,7 +2459,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        }
 
 #if WATCH_BUF
-       DRM_INFO("Binding object of size %d at 0x%08x\n",
+       DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
        ret = i915_gem_object_get_pages(obj);
@@ -2441,16 +2509,6 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
        if (obj_priv->pages == NULL)
                return;
 
-       /* XXX: The 865 in particular appears to be weird in how it handles
-        * cache flushing.  We haven't figured it out, but the
-        * clflush+agp_chipset_flush doesn't appear to successfully get the
-        * data visible to the PGU, while wbinvd + agp_chipset_flush does.
-        */
-       if (IS_I865G(obj->dev)) {
-               wbinvd();
-               return;
-       }
-
        drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
 }
 
@@ -2711,6 +2769,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
        BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
        BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
 
+       intel_mark_busy(dev, obj);
+
 #if WATCH_BUF
        DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
                 __func__, obj,
@@ -2800,8 +2860,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
        /* Free the page_cpu_valid mappings which are now stale, whether
         * or not we've got I915_GEM_DOMAIN_CPU.
         */
-       drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
-                DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
        obj_priv->page_cpu_valid = NULL;
 }
 
@@ -2843,8 +2902,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
         * newly adding I915_GEM_DOMAIN_CPU
         */
        if (obj_priv->page_cpu_valid == NULL) {
-               obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
-                                                     DRM_MEM_DRIVER);
+               obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
+                                                  GFP_KERNEL);
                if (obj_priv->page_cpu_valid == NULL)
                        return -ENOMEM;
        } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
@@ -3267,8 +3326,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        }
 
        if (args->num_cliprects != 0) {
-               cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
-                                      DRM_MEM_DRIVER);
+               cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
+                                   GFP_KERNEL);
                if (cliprects == NULL)
                        goto pre_mutex_err;
 
@@ -3292,7 +3351,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EIO;
@@ -3521,8 +3580,7 @@ err:
 pre_mutex_err:
        drm_free_large(object_list);
        drm_free_large(exec_list);
-       drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
-                DRM_MEM_DRIVER);
+       kfree(cliprects);
 
        return ret;
 }
@@ -3547,10 +3605,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
         * Pre-965 chips need a fence register set up in order to
         * properly handle tiled surfaces.
         */
-       if (!IS_I965G(dev) &&
-           obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj, true);
+       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
+               ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
                        if (ret != -EBUSY && ret != -ERESTARTSYS)
                                DRM_ERROR("Failure to install fence: %d\n",
@@ -3739,7 +3795,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv;
 
-       obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+       obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
        if (obj_priv == NULL)
                return -ENOMEM;
 
@@ -3758,6 +3814,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->fence_list);
 
        return 0;
 }
@@ -3775,11 +3832,12 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
        i915_gem_object_unbind(obj);
 
-       i915_gem_free_mmap_offset(obj);
+       if (obj_priv->mmap_offset)
+               i915_gem_free_mmap_offset(obj);
 
-       drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+       kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
-       drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+       kfree(obj->driver_private);
 }
 
 /** Unbinds all objects that are on the given buffer list. */
@@ -3833,6 +3891,7 @@ i915_gem_idle(struct drm_device *dev)
         * We need to replace this with a semaphore, or something.
         */
        dev_priv->mm.suspended = 1;
+       del_timer(&dev_priv->hangcheck_timer);
 
        /* Cancel the retire work handler, wait for it to finish if running
         */
@@ -3862,7 +3921,7 @@ i915_gem_idle(struct drm_device *dev)
                if (last_seqno == cur_seqno) {
                        if (stuck++ > 100) {
                                DRM_ERROR("hardware wedged\n");
-                               dev_priv->mm.wedged = 1;
+                               atomic_set(&dev_priv->mm.wedged, 1);
                                DRM_WAKEUP(&dev_priv->irq_queue);
                                break;
                        }
@@ -3875,7 +3934,7 @@ i915_gem_idle(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       if (!dev_priv->mm.wedged) {
+       if (!atomic_read(&dev_priv->mm.wedged)) {
                /* Active and flushing should now be empty as we've
                 * waited for a sequence higher than any pending execbuffer
                 */
@@ -4031,7 +4090,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 
        /* Set up the kernel mapping for the ring. */
        ring->Size = obj->size;
-       ring->tail_mask = obj->size - 1;
 
        ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
        ring->map.size = obj->size;
@@ -4138,9 +4196,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               dev_priv->mm.wedged = 0;
+               atomic_set(&dev_priv->mm.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -4197,6 +4255,7 @@ i915_gem_lastclose(struct drm_device *dev)
 void
 i915_gem_load(struct drm_device *dev)
 {
+       int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4204,10 +4263,15 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       spin_lock(&shrink_list_lock);
+       list_add(&dev_priv->mm.shrink_list, &shrink_list);
+       spin_unlock(&shrink_list_lock);
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
@@ -4216,6 +4280,18 @@ i915_gem_load(struct drm_device *dev)
        else
                dev_priv->num_fence_regs = 8;
 
+       /* Initialize fence registers to zero */
+       if (IS_I965G(dev)) {
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+       } else {
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       }
+
        i915_gem_detect_bit_6_swizzle(dev);
 }
 
@@ -4233,7 +4309,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
        if (dev_priv->mm.phys_objs[id - 1] || !size)
                return 0;
 
-       phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
        if (!phys_obj)
                return -ENOMEM;
 
@@ -4252,7 +4328,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
 
        return 0;
 kfree_obj:
-       drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       kfree(phys_obj);
        return ret;
 }
 
@@ -4312,6 +4388,8 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        }
        drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
+
+       i915_gem_object_put_pages(obj);
 out:
        obj_priv->phys_obj->cur_obj = NULL;
        obj_priv->phys_obj = NULL;
@@ -4369,6 +4447,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                kunmap_atomic(src, KM_USER0);
        }
 
+       i915_gem_object_put_pages(obj);
+
        return 0;
 out:
        return ret;
@@ -4409,3 +4489,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
                list_del_init(i915_file_priv->mm.request_list.next);
        mutex_unlock(&dev->struct_mutex);
 }
+
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+    struct inode *inode;
+
+    inode = obj->filp->f_path.dentry->d_inode;
+
+    mutex_lock(&inode->i_mutex);
+    truncate_inode_pages(inode->i_mapping, 0);
+    mutex_unlock(&inode->i_mutex);
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return !obj_priv->dirty;
+}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       drm_i915_private_t *dev_priv, *next_dev;
+       struct drm_i915_gem_object *obj_priv, *next_obj;
+       int cnt = 0;
+       int would_deadlock = 1;
+
+       /* "fast-path" to count number of available objects */
+       if (nr_to_scan == 0) {
+               spin_lock(&shrink_list_lock);
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (mutex_trylock(&dev->struct_mutex)) {
+                               list_for_each_entry(obj_priv,
+                                                   &dev_priv->mm.inactive_list,
+                                                   list)
+                                       cnt++;
+                               mutex_unlock(&dev->struct_mutex);
+                       }
+               }
+               spin_unlock(&shrink_list_lock);
+
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       }
+
+       spin_lock(&shrink_list_lock);
+
+       /* first scan for clean buffers */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               i915_gem_retire_requests(dev);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (i915_gem_object_is_purgeable(obj_priv)) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               i915_gem_object_truncate(obj);
+
+                               if (--nr_to_scan <= 0)
+                                       break;
+                       }
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               if (nr_to_scan <= 0)
+                       break;
+       }
+
+       /* second pass, evict/count anything still on the inactive list */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (nr_to_scan > 0) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               if (i915_gem_object_is_purgeable(obj_priv))
+                                       i915_gem_object_truncate(obj);
+
+                               nr_to_scan--;
+                       } else
+                               cnt++;
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+       }
+
+       spin_unlock(&shrink_list_lock);
+
+       if (would_deadlock)
+               return -1;
+       else if (cnt > 0)
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       else
+               return 0;
+}
+
+static struct shrinker shrinker = {
+       .shrink = i915_gem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+    register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+    unregister_shrinker(&shrinker);
+}