drm/i915: introduce to_intel_bo helper
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_irq.c
index e7472d8..bfbdad9 100644 (file)
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, DE_GSE);
        else
                i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
        drm_sysfs_hotplug_event(dev);
 }
 
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 busy_up, busy_down, max_avg, min_avg;
+       u16 rgvswctl;
+       u8 new_delay = dev_priv->cur_delay;
+
+       I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+       busy_up = I915_READ(RCPREVBSYTUPAVG);
+       busy_down = I915_READ(RCPREVBSYTDNAVG);
+       max_avg = I915_READ(RCBMAXAVG);
+       min_avg = I915_READ(RCBMINAVG);
+
+       /* Handle RCS change request from hw */
+       if (busy_up > max_avg) {
+               if (dev_priv->cur_delay != dev_priv->max_delay)
+                       new_delay = dev_priv->cur_delay - 1;
+               if (new_delay < dev_priv->max_delay)
+                       new_delay = dev_priv->max_delay;
+       } else if (busy_down < min_avg) {
+               if (dev_priv->cur_delay != dev_priv->min_delay)
+                       new_delay = dev_priv->cur_delay + 1;
+               if (new_delay > dev_priv->min_delay)
+                       new_delay = dev_priv->min_delay;
+       }
+
+       DRM_DEBUG("rps change requested: %d -> %d\n",
+                 dev_priv->cur_delay, new_delay);
+
+       rgvswctl = I915_READ(MEMSWCTL);
+       if (rgvswctl & MEMCTL_CMD_STS) {
+               DRM_ERROR("gpu busy, RCS change rejected\n");
+               return; /* still busy with another command */
+       }
+
+       /* Program the new state */
+       rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+               (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+       I915_WRITE(MEMSWCTL, rgvswctl);
+       POSTING_READ(MEMSWCTL);
+
+       rgvswctl |= MEMCTL_CMD_STS;
+       I915_WRITE(MEMSWCTL, rgvswctl);
+
+       dev_priv->cur_delay = new_delay;
+
+       DRM_DEBUG("rps changed\n");
+
+       return;
+}
+
 irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -309,6 +360,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        if (de_iir & DE_GSE)
                ironlake_opregion_gse_intr(dev);
 
+       if (de_iir & DE_PLANEA_FLIP_DONE) {
+               intel_prepare_page_flip(dev, 0);
+               intel_finish_page_flip(dev, 0);
+       }
+
+       if (de_iir & DE_PLANEB_FLIP_DONE) {
+               intel_prepare_page_flip(dev, 1);
+               intel_finish_page_flip(dev, 1);
+       }
+
        if (de_iir & DE_PIPEA_VBLANK)
                drm_handle_vblank(dev, 0);
 
@@ -321,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
        }
 
+       if (de_iir & DE_PCU_EVENT) {
+               I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+               i915_handle_rps_change(dev);
+       }
+
        /* should clear PCH hotplug event before clear CPU irq */
        I915_WRITE(SDEIIR, pch_iir);
        I915_WRITE(GTIIR, gt_iir);
@@ -366,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
        }
 }
 
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_device *dev,
+                        struct drm_gem_object *src)
+{
+       struct drm_i915_error_object *dst;
+       struct drm_i915_gem_object *src_priv;
+       int page, page_count;
+
+       if (src == NULL)
+               return NULL;
+
+       src_priv = to_intel_bo(src);
+       if (src_priv->pages == NULL)
+               return NULL;
+
+       page_count = src->size / PAGE_SIZE;
+
+       dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+       if (dst == NULL)
+               return NULL;
+
+       for (page = 0; page < page_count; page++) {
+               void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+               if (d == NULL)
+                       goto unwind;
+               s = kmap_atomic(src_priv->pages[page], KM_USER0);
+               memcpy(d, s, PAGE_SIZE);
+               kunmap_atomic(s, KM_USER0);
+               dst->pages[page] = d;
+       }
+       dst->page_count = page_count;
+       dst->gtt_offset = src_priv->gtt_offset;
+
+       return dst;
+
+unwind:
+       while (page--)
+               kfree(dst->pages[page]);
+       kfree(dst);
+       return NULL;
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+       int page;
+
+       if (obj == NULL)
+               return;
+
+       for (page = 0; page < obj->page_count; page++)
+               kfree(obj->pages[page]);
+
+       kfree(obj);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+                     struct drm_i915_error_state *error)
+{
+       i915_error_object_free(error->batchbuffer[0]);
+       i915_error_object_free(error->batchbuffer[1]);
+       i915_error_object_free(error->ringbuffer);
+       kfree(error->active_bo);
+       kfree(error);
+}
+
+static u32
+i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+{
+       u32 cmd;
+
+       if (IS_I830(dev) || IS_845G(dev))
+               cmd = MI_BATCH_BUFFER;
+       else if (IS_I965G(dev))
+               cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+                      MI_BATCH_NON_SECURE_I965);
+       else
+               cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+
+       return ring[0] == cmd ? ring[1] : 0;
+}
+
+static u32
+i915_ringbuffer_last_batch(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 head, bbaddr;
+       u32 *ring;
+
+       /* Locate the current position in the ringbuffer and walk back
+        * to find the most recently dispatched batch buffer.
+        */
+       bbaddr = 0;
+       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       ring = (u32 *)(dev_priv->ring.virtual_start + head);
+
+       while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+               bbaddr = i915_get_bbaddr(dev, ring);
+               if (bbaddr)
+                       break;
+       }
+
+       if (bbaddr == 0) {
+               ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
+               while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+                       bbaddr = i915_get_bbaddr(dev, ring);
+                       if (bbaddr)
+                               break;
+               }
+       }
+
+       return bbaddr;
+}
+
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -378,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
 static void i915_capture_error_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
        struct drm_i915_error_state *error;
+       struct drm_gem_object *batchbuffer[2];
        unsigned long flags;
+       u32 bbaddr;
+       int count;
 
        spin_lock_irqsave(&dev_priv->error_lock, flags);
-       if (dev_priv->first_error)
-               goto out;
+       error = dev_priv->first_error;
+       spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+       if (error)
+               return;
 
        error = kmalloc(sizeof(*error), GFP_ATOMIC);
        if (!error) {
-               DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
-               goto out;
+               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               return;
        }
 
+       error->seqno = i915_get_gem_seqno(dev);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        error->pipeastat = I915_READ(PIPEASTAT);
@@ -401,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
                error->ipehr = I915_READ(IPEHR);
                error->instdone = I915_READ(INSTDONE);
                error->acthd = I915_READ(ACTHD);
+               error->bbaddr = 0;
        } else {
                error->ipeir = I915_READ(IPEIR_I965);
                error->ipehr = I915_READ(IPEHR_I965);
@@ -408,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
                error->instps = I915_READ(INSTPS);
                error->instdone1 = I915_READ(INSTDONE1);
                error->acthd = I915_READ(ACTHD_I965);
+               error->bbaddr = I915_READ64(BB_ADDR);
        }
 
-       do_gettimeofday(&error->time);
+       bbaddr = i915_ringbuffer_last_batch(dev);
+
+       /* Grab the current batchbuffer, most likely to have crashed. */
+       batchbuffer[0] = NULL;
+       batchbuffer[1] = NULL;
+       count = 0;
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+               struct drm_gem_object *obj = obj_priv->obj;
+
+               if (batchbuffer[0] == NULL &&
+                   bbaddr >= obj_priv->gtt_offset &&
+                   bbaddr < obj_priv->gtt_offset + obj->size)
+                       batchbuffer[0] = obj;
+
+               if (batchbuffer[1] == NULL &&
+                   error->acthd >= obj_priv->gtt_offset &&
+                   error->acthd < obj_priv->gtt_offset + obj->size &&
+                   batchbuffer[0] != obj)
+                       batchbuffer[1] = obj;
+
+               count++;
+       }
+
+       /* We need to copy these to an anonymous buffer as the simplest
+        * method to avoid being overwritten by userpace.
+        */
+       error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+       error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+
+       /* Record the ringbuffer */
+       error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+
+       /* Record buffers on the active list. */
+       error->active_bo = NULL;
+       error->active_bo_count = 0;
+
+       if (count)
+               error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+                                          GFP_ATOMIC);
+
+       if (error->active_bo) {
+               int i = 0;
+               list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+                       struct drm_gem_object *obj = obj_priv->obj;
+
+                       error->active_bo[i].size = obj->size;
+                       error->active_bo[i].name = obj->name;
+                       error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
+                       error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
+                       error->active_bo[i].read_domains = obj->read_domains;
+                       error->active_bo[i].write_domain = obj->write_domain;
+                       error->active_bo[i].fence_reg = obj_priv->fence_reg;
+                       error->active_bo[i].pinned = 0;
+                       if (obj_priv->pin_count > 0)
+                               error->active_bo[i].pinned = 1;
+                       if (obj_priv->user_pin_count > 0)
+                               error->active_bo[i].pinned = -1;
+                       error->active_bo[i].tiling = obj_priv->tiling_mode;
+                       error->active_bo[i].dirty = obj_priv->dirty;
+                       error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
+
+                       if (++i == count)
+                               break;
+               }
+               error->active_bo_count = i;
+       }
 
-       dev_priv->first_error = error;
+       do_gettimeofday(&error->time);
 
-out:
+       spin_lock_irqsave(&dev_priv->error_lock, flags);
+       if (dev_priv->first_error == NULL) {
+               dev_priv->first_error = error;
+               error = NULL;
+       }
        spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+       if (error)
+               i915_error_state_free(dev, error);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+
+       spin_lock(&dev_priv->error_lock);
+       error = dev_priv->first_error;
+       dev_priv->first_error = NULL;
+       spin_unlock(&dev_priv->error_lock);
+
+       if (error)
+               i915_error_state_free(dev, error);
 }
 
 /**
@@ -566,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
        atomic_inc(&dev_priv->irq_received);
 
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                return ironlake_irq_handler(dev);
 
        iir = I915_READ(IIR);
@@ -727,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-               if (IS_IRONLAKE(dev))
+               if (HAS_PCH_SPLIT(dev))
                        ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -743,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
        if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-               if (IS_IRONLAKE(dev))
+               if (HAS_PCH_SPLIT(dev))
                        ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -851,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
        else if (IS_I965G(dev))
@@ -873,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
                                             DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
        else
@@ -887,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!IS_IRONLAKE(dev))
+       if (!HAS_PCH_SPLIT(dev))
                opregion_enable_asle(dev);
        dev_priv->irq_enabled = 1;
 }
@@ -963,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t acthd;
-       
+
+       /* No reset support on this chip yet. */
+       if (IS_GEN6(dev))
+               return;
+
        if (!IS_I965G(dev))
                acthd = I915_READ(ACTHD);
        else
@@ -1023,13 +1303,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                          DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
        u32 render_mask = GT_USER_INTERRUPT;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
        dev_priv->irq_mask_reg = ~display_mask;
-       dev_priv->de_irq_enable_reg = display_mask;
+       dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
 
        /* should always can generate irq */
        I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1054,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
        (void) I915_READ(SDEIER);
 
+       if (IS_IRONLAKE_M(dev)) {
+               /* Clear & enable PCU event interrupts */
+               I915_WRITE(DEIIR, DE_PCU_EVENT);
+               I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+       }
+
        return 0;
 }
 
@@ -1066,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->error_work, i915_error_work_func);
 
-       if (IS_IRONLAKE(dev)) {
+       if (HAS_PCH_SPLIT(dev)) {
                ironlake_irq_preinstall(dev);
                return;
        }
@@ -1098,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 
        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                return ironlake_irq_postinstall(dev);
 
        /* Unmask the interrupts that we always want on. */
@@ -1186,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
 
        dev_priv->vblank_pipe = 0;
 
-       if (IS_IRONLAKE(dev)) {
+       if (HAS_PCH_SPLIT(dev)) {
                ironlake_irq_uninstall(dev);
                return;
        }