1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define MAX_NOPID ((u32)~0)
37 * Interrupts that are always left unmasked.
39 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
40 * we leave them always unmasked in IMR and then control enabling them through
43 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
44 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
45 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
47 /** Interrupts that we mask and unmask at runtime. */
48 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
50 /** These are all of the interrupts used by the driver */
51 #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
52 I915_INTERRUPT_ENABLE_VAR)
55 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
57 if ((dev_priv->irq_mask_reg & mask) != 0) {
58 dev_priv->irq_mask_reg &= ~mask;
59 I915_WRITE(IMR, dev_priv->irq_mask_reg);
60 (void) I915_READ(IMR);
65 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
67 if ((dev_priv->irq_mask_reg & mask) != mask) {
68 dev_priv->irq_mask_reg |= mask;
69 I915_WRITE(IMR, dev_priv->irq_mask_reg);
70 (void) I915_READ(IMR);
75 i915_pipestat(int pipe)
85 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
87 if ((dev_priv->pipestat[pipe] & mask) != mask) {
88 u32 reg = i915_pipestat(pipe);
90 dev_priv->pipestat[pipe] |= mask;
91 /* Enable the interrupt, clear any pending status */
92 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
93 (void) I915_READ(reg);
98 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
100 if ((dev_priv->pipestat[pipe] & mask) != 0) {
101 u32 reg = i915_pipestat(pipe);
103 dev_priv->pipestat[pipe] &= ~mask;
104 I915_WRITE(reg, dev_priv->pipestat[pipe]);
105 (void) I915_READ(reg);
110 * i915_pipe_enabled - check if a pipe is enabled
112 * @pipe: pipe to check
114 * Reading certain registers when the pipe is disabled can hang the chip.
115 * Use this routine to make sure the PLL is running and the pipe is active
116 * before reading such registers if unsure.
119 i915_pipe_enabled(struct drm_device *dev, int pipe)
121 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
122 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
124 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
130 /* Called from drm generic code, passed a 'crtc', which
131 * we use as a pipe index
133 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
135 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
136 unsigned long high_frame;
137 unsigned long low_frame;
138 u32 high1, high2, low, count;
140 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
141 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
143 if (!i915_pipe_enabled(dev, pipe)) {
144 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
155 PIPE_FRAME_HIGH_SHIFT);
156 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
157 PIPE_FRAME_LOW_SHIFT);
158 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
159 PIPE_FRAME_HIGH_SHIFT);
160 } while (high1 != high2);
162 count = (high1 << 8) | low;
167 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
169 struct drm_device *dev = (struct drm_device *) arg;
170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 u32 pipea_stats = 0, pipeb_stats = 0;
174 unsigned long irqflags;
176 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
177 atomic_inc(&dev_priv->irq_received);
179 if (dev->pdev->msi_enabled)
181 iir = I915_READ(IIR);
184 if (dev->pdev->msi_enabled) {
185 I915_WRITE(IMR, dev_priv->irq_mask_reg);
186 (void) I915_READ(IMR);
188 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
193 * Clear the PIPE(A|B)STAT regs before the IIR
195 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
196 pipea_stats = I915_READ(PIPEASTAT);
197 I915_WRITE(PIPEASTAT, pipea_stats);
200 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
201 pipeb_stats = I915_READ(PIPEBSTAT);
202 I915_WRITE(PIPEBSTAT, pipeb_stats);
205 I915_WRITE(IIR, iir);
206 if (dev->pdev->msi_enabled)
207 I915_WRITE(IMR, dev_priv->irq_mask_reg);
208 (void) I915_READ(IIR); /* Flush posted writes */
210 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
212 if (dev_priv->sarea_priv)
213 dev_priv->sarea_priv->last_dispatch =
214 READ_BREADCRUMB(dev_priv);
216 if (iir & I915_USER_INTERRUPT) {
217 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
218 DRM_WAKEUP(&dev_priv->irq_queue);
221 if (pipea_stats & I915_VBLANK_INTERRUPT_STATUS) {
223 drm_handle_vblank(dev, 0);
226 if (pipeb_stats & I915_VBLANK_INTERRUPT_STATUS) {
228 drm_handle_vblank(dev, 1);
231 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
232 (iir & I915_ASLE_INTERRUPT))
233 opregion_asle_intr(dev);
238 static int i915_emit_irq(struct drm_device * dev)
240 drm_i915_private_t *dev_priv = dev->dev_private;
243 i915_kernel_lost_context(dev);
248 if (dev_priv->counter > 0x7FFFFFFFUL)
249 dev_priv->counter = 1;
250 if (dev_priv->sarea_priv)
251 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
254 OUT_RING(MI_STORE_DWORD_INDEX);
255 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
256 OUT_RING(dev_priv->counter);
257 OUT_RING(MI_USER_INTERRUPT);
260 return dev_priv->counter;
263 void i915_user_irq_get(struct drm_device *dev)
265 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
266 unsigned long irqflags;
268 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
269 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
270 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
271 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
274 void i915_user_irq_put(struct drm_device *dev)
276 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
277 unsigned long irqflags;
279 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
280 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
281 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
282 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
283 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
286 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
288 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
291 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
292 READ_BREADCRUMB(dev_priv));
294 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
295 if (dev_priv->sarea_priv) {
296 dev_priv->sarea_priv->last_dispatch =
297 READ_BREADCRUMB(dev_priv);
302 if (dev_priv->sarea_priv)
303 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
305 i915_user_irq_get(dev);
306 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
307 READ_BREADCRUMB(dev_priv) >= irq_nr);
308 i915_user_irq_put(dev);
311 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
312 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
315 if (dev_priv->sarea_priv)
316 dev_priv->sarea_priv->last_dispatch =
317 READ_BREADCRUMB(dev_priv);
322 /* Needs the lock as it touches the ring.
324 int i915_irq_emit(struct drm_device *dev, void *data,
325 struct drm_file *file_priv)
327 drm_i915_private_t *dev_priv = dev->dev_private;
328 drm_i915_irq_emit_t *emit = data;
331 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
334 DRM_ERROR("called with no initialization\n");
337 mutex_lock(&dev->struct_mutex);
338 result = i915_emit_irq(dev);
339 mutex_unlock(&dev->struct_mutex);
341 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
342 DRM_ERROR("copy_to_user\n");
349 /* Doesn't need the hardware lock.
351 int i915_irq_wait(struct drm_device *dev, void *data,
352 struct drm_file *file_priv)
354 drm_i915_private_t *dev_priv = dev->dev_private;
355 drm_i915_irq_wait_t *irqwait = data;
358 DRM_ERROR("called with no initialization\n");
362 return i915_wait_irq(dev, irqwait->irq_seq);
365 /* Called from drm generic code, passed 'crtc' which
366 * we use as a pipe index
368 int i915_enable_vblank(struct drm_device *dev, int pipe)
370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
371 unsigned long irqflags;
373 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
375 i915_enable_pipestat(dev_priv, pipe,
376 PIPE_START_VBLANK_INTERRUPT_ENABLE);
378 i915_enable_pipestat(dev_priv, pipe,
379 PIPE_VBLANK_INTERRUPT_ENABLE);
380 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
384 /* Called from drm generic code, passed 'crtc' which
385 * we use as a pipe index
387 void i915_disable_vblank(struct drm_device *dev, int pipe)
389 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
390 unsigned long irqflags;
392 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
393 i915_disable_pipestat(dev_priv, pipe,
394 PIPE_VBLANK_INTERRUPT_ENABLE |
395 PIPE_START_VBLANK_INTERRUPT_ENABLE);
396 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
399 /* Set the vblank monitor pipe
401 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
402 struct drm_file *file_priv)
404 drm_i915_private_t *dev_priv = dev->dev_private;
407 DRM_ERROR("called with no initialization\n");
414 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
415 struct drm_file *file_priv)
417 drm_i915_private_t *dev_priv = dev->dev_private;
418 drm_i915_vblank_pipe_t *pipe = data;
421 DRM_ERROR("called with no initialization\n");
425 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
431 * Schedule buffer swap at given vertical blank.
433 int i915_vblank_swap(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
436 /* The delayed swap mechanism was fundamentally racy, and has been
437 * removed. The model was that the client requested a delayed flip/swap
438 * from the kernel, then waited for vblank before continuing to perform
439 * rendering. The problem was that the kernel might wake the client
440 * up before it dispatched the vblank swap (since the lock has to be
441 * held while touching the ringbuffer), in which case the client would
442 * clear and start the next frame before the swap occurred, and
443 * flicker would occur in addition to likely missing the vblank.
445 * In the absence of this ioctl, userland falls back to a correct path
446 * of waiting for a vblank, then dispatching the swap on its own.
447 * Context switching to userland and back is plenty fast enough for
448 * meeting the requirements of vblank swapping.
455 void i915_driver_irq_preinstall(struct drm_device * dev)
457 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
459 I915_WRITE(HWSTAM, 0xeffe);
460 I915_WRITE(PIPEASTAT, 0);
461 I915_WRITE(PIPEBSTAT, 0);
462 I915_WRITE(IMR, 0xffffffff);
463 I915_WRITE(IER, 0x0);
464 (void) I915_READ(IER);
467 int i915_driver_irq_postinstall(struct drm_device *dev)
469 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
470 int ret, num_pipes = 2;
472 ret = drm_vblank_init(dev, num_pipes);
476 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
478 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
480 /* Unmask the interrupts that we always want on. */
481 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
483 dev_priv->pipestat[0] = 0;
484 dev_priv->pipestat[1] = 0;
486 /* Disable pipe interrupt enables, clear pending pipe status */
487 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
488 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
489 /* Clear pending interrupt status */
490 I915_WRITE(IIR, I915_READ(IIR));
492 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
493 I915_WRITE(IMR, dev_priv->irq_mask_reg);
494 (void) I915_READ(IER);
496 opregion_enable_asle(dev);
497 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
502 void i915_driver_irq_uninstall(struct drm_device * dev)
504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
509 dev_priv->vblank_pipe = 0;
511 I915_WRITE(HWSTAM, 0xffffffff);
512 I915_WRITE(PIPEASTAT, 0);
513 I915_WRITE(PIPEBSTAT, 0);
514 I915_WRITE(IMR, 0xffffffff);
515 I915_WRITE(IER, 0x0);
517 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
518 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
519 I915_WRITE(IIR, I915_READ(IIR));