drm/i915: Add ioctl to set 'purgeability' of objects
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
index 30ea4b6..2ab30f2 100644 (file)
@@ -53,6 +53,9 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
@@ -1155,19 +1158,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
        if (!obj_priv->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return VM_FAULT_SIGBUS;
                }
+               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
                ret = i915_gem_object_set_to_gtt_domain(obj, write);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return VM_FAULT_SIGBUS;
                }
-
-               list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
        }
 
        /* Need a new fence register? */
@@ -1399,22 +1401,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        args->offset = obj_priv->mmap_offset;
 
-       obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
-       /* Make sure the alignment is correct for fence regs etc */
-       if (obj_priv->agp_mem &&
-           (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
        /*
         * Pull it into the GTT so that we have a page list (makes the
         * initial fault faster and any subsequent flushing possible).
         */
        if (!obj_priv->agp_mem) {
-               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, 0);
                if (ret) {
                        drm_gem_object_unreference(obj);
                        mutex_unlock(&dev->struct_mutex);
@@ -1444,13 +1436,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        if (obj_priv->tiling_mode != I915_TILING_NONE)
                i915_gem_object_save_bit_17_swizzle(obj);
 
-       for (i = 0; i < page_count; i++)
-               if (obj_priv->pages[i] != NULL) {
-                       if (obj_priv->dirty)
-                               set_page_dirty(obj_priv->pages[i]);
-                       mark_page_accessed(obj_priv->pages[i]);
-                       page_cache_release(obj_priv->pages[i]);
-               }
+       if (obj_priv->madv == I915_MADV_DONTNEED)
+           obj_priv->dirty = 0;
+
+       for (i = 0; i < page_count; i++) {
+               if (obj_priv->pages[i] == NULL)
+                       break;
+
+               if (obj_priv->dirty)
+                       set_page_dirty(obj_priv->pages[i]);
+
+               if (obj_priv->madv == I915_MADV_WILLNEED)
+                   mark_page_accessed(obj_priv->pages[i]);
+
+               page_cache_release(obj_priv->pages[i]);
+       }
        obj_priv->dirty = 0;
 
        drm_free_large(obj_priv->pages);
@@ -1584,8 +1584,11 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 
        }
 
-       if (was_empty && !dev_priv->mm.suspended)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+       if (!dev_priv->mm.suspended) {
+               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               if (was_empty)
+                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+       }
        return seqno;
 }
 
@@ -1671,7 +1674,7 @@ out:
 /**
  * Returns true if seq1 is later than seq2.
  */
-static int
+bool
 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 {
        return (int32_t)(seq1 - seq2) >= 0;
@@ -1709,7 +1712,7 @@ i915_gem_retire_requests(struct drm_device *dev)
                retiring_seqno = request->seqno;
 
                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   dev_priv->mm.wedged) {
+                   atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);
 
                        list_del(&request->list);
@@ -1751,6 +1754,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
                if (IS_IGDNG(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1768,11 +1774,11 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
                ret = wait_event_interruptible(dev_priv->irq_queue,
                                               i915_seqno_passed(i915_get_gem_seqno(dev),
                                                                 seqno) ||
-                                              dev_priv->mm.wedged);
+                                              atomic_read(&dev_priv->mm.wedged));
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
        }
-       if (dev_priv->mm.wedged)
+       if (atomic_read(&dev_priv->mm.wedged))
                ret = -EIO;
 
        if (ret && ret != -ERESTARTSYS)
@@ -2414,6 +2420,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 
        if (dev_priv->mm.suspended)
                return -EBUSY;
+
+       if (obj_priv->madv == I915_MADV_DONTNEED) {
+               DRM_ERROR("Attempting to bind a purgeable object\n");
+               return -EINVAL;
+       }
+
        if (alignment == 0)
                alignment = i915_gem_get_gtt_alignment(obj);
        if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -3353,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EIO;
@@ -3681,6 +3693,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
        obj_priv = obj->driver_private;
 
+       if (obj_priv->madv == I915_MADV_DONTNEED) {
+               DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n");
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
        if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -3793,6 +3812,49 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
     return i915_gem_ring_throttle(dev, file_priv);
 }
 
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_i915_gem_madvise *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       switch (args->madv) {
+       case I915_MADV_DONTNEED:
+       case I915_MADV_WILLNEED:
+           break;
+       default:
+           return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+                         args->handle);
+               return -EBADF;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       obj_priv = obj->driver_private;
+
+       if (obj_priv->pin_count) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+
+               DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+               return -EINVAL;
+       }
+
+       obj_priv->madv = args->madv;
+       args->retained = obj_priv->gtt_space != NULL;
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 int i915_gem_init_object(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv;
@@ -3817,6 +3879,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
        INIT_LIST_HEAD(&obj_priv->fence_list);
+       obj_priv->madv = I915_MADV_WILLNEED;
 
        return 0;
 }
@@ -3834,7 +3897,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
        i915_gem_object_unbind(obj);
 
-       i915_gem_free_mmap_offset(obj);
+       if (obj_priv->mmap_offset)
+               i915_gem_free_mmap_offset(obj);
 
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
@@ -3892,6 +3956,7 @@ i915_gem_idle(struct drm_device *dev)
         * We need to replace this with a semaphore, or something.
         */
        dev_priv->mm.suspended = 1;
+       del_timer(&dev_priv->hangcheck_timer);
 
        /* Cancel the retire work handler, wait for it to finish if running
         */
@@ -3921,7 +3986,7 @@ i915_gem_idle(struct drm_device *dev)
                if (last_seqno == cur_seqno) {
                        if (stuck++ > 100) {
                                DRM_ERROR("hardware wedged\n");
-                               dev_priv->mm.wedged = 1;
+                               atomic_set(&dev_priv->mm.wedged, 1);
                                DRM_WAKEUP(&dev_priv->irq_queue);
                                break;
                        }
@@ -3934,7 +3999,7 @@ i915_gem_idle(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       if (!dev_priv->mm.wedged) {
+       if (!atomic_read(&dev_priv->mm.wedged)) {
                /* Active and flushing should now be empty as we've
                 * waited for a sequence higher than any pending execbuffer
                 */
@@ -4196,9 +4261,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (dev_priv->mm.wedged) {
+       if (atomic_read(&dev_priv->mm.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               dev_priv->mm.wedged = 0;
+               atomic_set(&dev_priv->mm.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -4268,6 +4333,10 @@ i915_gem_load(struct drm_device *dev)
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       spin_lock(&shrink_list_lock);
+       list_add(&dev_priv->mm.shrink_list, &shrink_list);
+       spin_unlock(&shrink_list_lock);
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
@@ -4485,3 +4554,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
                list_del_init(i915_file_priv->mm.request_list.next);
        mutex_unlock(&dev->struct_mutex);
 }
+
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+    struct inode *inode;
+
+    inode = obj->filp->f_path.dentry->d_inode;
+
+    mutex_lock(&inode->i_mutex);
+    truncate_inode_pages(inode->i_mapping, 0);
+    mutex_unlock(&inode->i_mutex);
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED;
+}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       drm_i915_private_t *dev_priv, *next_dev;
+       struct drm_i915_gem_object *obj_priv, *next_obj;
+       int cnt = 0;
+       int would_deadlock = 1;
+
+       /* "fast-path" to count number of available objects */
+       if (nr_to_scan == 0) {
+               spin_lock(&shrink_list_lock);
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (mutex_trylock(&dev->struct_mutex)) {
+                               list_for_each_entry(obj_priv,
+                                                   &dev_priv->mm.inactive_list,
+                                                   list)
+                                       cnt++;
+                               mutex_unlock(&dev->struct_mutex);
+                       }
+               }
+               spin_unlock(&shrink_list_lock);
+
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       }
+
+       spin_lock(&shrink_list_lock);
+
+       /* first scan for clean buffers */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               i915_gem_retire_requests(dev);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (i915_gem_object_is_purgeable(obj_priv)) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               i915_gem_object_truncate(obj);
+
+                               if (--nr_to_scan <= 0)
+                                       break;
+                       }
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               if (nr_to_scan <= 0)
+                       break;
+       }
+
+       /* second pass, evict/count anything still on the inactive list */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (nr_to_scan > 0) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               if (i915_gem_object_is_purgeable(obj_priv))
+                                       i915_gem_object_truncate(obj);
+
+                               nr_to_scan--;
+                       } else
+                               cnt++;
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+       }
+
+       spin_unlock(&shrink_list_lock);
+
+       if (would_deadlock)
+               return -1;
+       else if (cnt > 0)
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       else
+               return 0;
+}
+
+static struct shrinker shrinker = {
+       .shrink = i915_gem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+    register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+    unregister_shrinker(&shrinker);
+}