drm/i915: Register a shrinker to free inactive lists under memory pressure
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 14 Sep 2009 15:50:28 +0000 (16:50 +0100)
committerJesse Barnes <jbarnes@virtuousgeek.org>
Thu, 17 Sep 2009 21:43:31 +0000 (14:43 -0700)
This should help GEM handle memory pressure sitatuions more gracefully.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 1f9e450..c57c174 100644 (file)
@@ -362,6 +362,8 @@ static int __init i915_init(void)
 {
        driver.num_ioctls = i915_max_ioctl;
 
 {
        driver.num_ioctls = i915_max_ioctl;
 
+       i915_gem_shrinker_init();
+
        /*
         * If CONFIG_DRM_I915_KMS is set, default to KMS unless
         * explicitly disabled with the module pararmeter.
        /*
         * If CONFIG_DRM_I915_KMS is set, default to KMS unless
         * explicitly disabled with the module pararmeter.
@@ -388,6 +390,7 @@ static int __init i915_init(void)
 
 static void __exit i915_exit(void)
 {
 
 static void __exit i915_exit(void)
 {
+       i915_gem_shrinker_exit();
        drm_exit(&driver);
 }
 
        drm_exit(&driver);
 }
 
index 0721469..bbcf5fc 100644 (file)
@@ -369,6 +369,15 @@ typedef struct drm_i915_private {
                int gtt_mtrr;
 
                /**
                int gtt_mtrr;
 
                /**
+                * Membership on list of all loaded devices, used to evict
+                * inactive buffers under memory pressure.
+                *
+                * Modifications should only be done whilst holding the
+                * shrink_list_lock spinlock.
+                */
+               struct list_head shrink_list;
+
+               /**
                 * List of objects currently involved in rendering from the
                 * ringbuffer.
                 *
                 * List of objects currently involved in rendering from the
                 * ringbuffer.
                 *
@@ -741,6 +750,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
 void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
 
 void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
 
+void i915_gem_shrinker_init(void);
+void i915_gem_shrinker_exit(void);
+
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
index 77cc6f5..2fff2e0 100644 (file)
@@ -53,6 +53,9 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
 {
@@ -4265,6 +4268,10 @@ i915_gem_load(struct drm_device *dev)
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       spin_lock(&shrink_list_lock);
+       list_add(&dev_priv->mm.shrink_list, &shrink_list);
+       spin_unlock(&shrink_list_lock);
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
@@ -4482,3 +4489,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
                list_del_init(i915_file_priv->mm.request_list.next);
        mutex_unlock(&dev->struct_mutex);
 }
                list_del_init(i915_file_priv->mm.request_list.next);
        mutex_unlock(&dev->struct_mutex);
 }
+
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+    struct inode *inode;
+
+    inode = obj->filp->f_path.dentry->d_inode;
+
+    mutex_lock(&inode->i_mutex);
+    truncate_inode_pages(inode->i_mapping, 0);
+    mutex_unlock(&inode->i_mutex);
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return !obj_priv->dirty;
+}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       drm_i915_private_t *dev_priv, *next_dev;
+       struct drm_i915_gem_object *obj_priv, *next_obj;
+       int cnt = 0;
+       int would_deadlock = 1;
+
+       /* "fast-path" to count number of available objects */
+       if (nr_to_scan == 0) {
+               spin_lock(&shrink_list_lock);
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (mutex_trylock(&dev->struct_mutex)) {
+                               list_for_each_entry(obj_priv,
+                                                   &dev_priv->mm.inactive_list,
+                                                   list)
+                                       cnt++;
+                               mutex_unlock(&dev->struct_mutex);
+                       }
+               }
+               spin_unlock(&shrink_list_lock);
+
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       }
+
+       spin_lock(&shrink_list_lock);
+
+       /* first scan for clean buffers */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               i915_gem_retire_requests(dev);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (i915_gem_object_is_purgeable(obj_priv)) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               i915_gem_object_truncate(obj);
+
+                               if (--nr_to_scan <= 0)
+                                       break;
+                       }
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               if (nr_to_scan <= 0)
+                       break;
+       }
+
+       /* second pass, evict/count anything still on the inactive list */
+       list_for_each_entry_safe(dev_priv, next_dev,
+                                &shrink_list, mm.shrink_list) {
+               struct drm_device *dev = dev_priv->dev;
+
+               if (! mutex_trylock(&dev->struct_mutex))
+                       continue;
+
+               spin_unlock(&shrink_list_lock);
+
+               list_for_each_entry_safe(obj_priv, next_obj,
+                                        &dev_priv->mm.inactive_list,
+                                        list) {
+                       if (nr_to_scan > 0) {
+                               struct drm_gem_object *obj = obj_priv->obj;
+                               i915_gem_object_unbind(obj);
+                               if (i915_gem_object_is_purgeable(obj_priv))
+                                       i915_gem_object_truncate(obj);
+
+                               nr_to_scan--;
+                       } else
+                               cnt++;
+               }
+
+               spin_lock(&shrink_list_lock);
+               mutex_unlock(&dev->struct_mutex);
+
+               would_deadlock = 0;
+       }
+
+       spin_unlock(&shrink_list_lock);
+
+       if (would_deadlock)
+               return -1;
+       else if (cnt > 0)
+               return (cnt / 100) * sysctl_vfs_cache_pressure;
+       else
+               return 0;
+}
+
+static struct shrinker shrinker = {
+       .shrink = i915_gem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+    register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+    unregister_shrinker(&shrinker);
+}