i915: Add support for MSI and interrupt mitigation.
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40 {
41         drm_i915_private_t *dev_priv = dev->dev_private;
42         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
44         int i;
45
46         for (i = 0; i < 10000; i++) {
47                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
48                 ring->space = ring->head - (ring->tail + 8);
49                 if (ring->space < 0)
50                         ring->space += ring->Size;
51                 if (ring->space >= n)
52                         return 0;
53
54                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55
56                 if (ring->head != last_head)
57                         i = 0;
58
59                 last_head = ring->head;
60         }
61
62         return -EBUSY;
63 }
64
65 void i915_kernel_lost_context(struct drm_device * dev)
66 {
67         drm_i915_private_t *dev_priv = dev->dev_private;
68         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69
70         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
71         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
72         ring->space = ring->head - (ring->tail + 8);
73         if (ring->space < 0)
74                 ring->space += ring->Size;
75
76         if (ring->head == ring->tail)
77                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78 }
79
80 static int i915_dma_cleanup(struct drm_device * dev)
81 {
82         drm_i915_private_t *dev_priv = dev->dev_private;
83         /* Make sure interrupts are disabled here because the uninstall ioctl
84          * may not have been called from userspace and after dev_private
85          * is freed, it's too late.
86          */
87         if (dev->irq_enabled)
88                 drm_irq_uninstall(dev);
89
90         if (dev_priv->ring.virtual_start) {
91                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
92                 dev_priv->ring.virtual_start = 0;
93                 dev_priv->ring.map.handle = 0;
94                 dev_priv->ring.map.size = 0;
95         }
96
97         if (dev_priv->status_page_dmah) {
98                 drm_pci_free(dev, dev_priv->status_page_dmah);
99                 dev_priv->status_page_dmah = NULL;
100                 /* Need to rewrite hardware status page */
101                 I915_WRITE(HWS_PGA, 0x1ffff000);
102         }
103
104         if (dev_priv->status_gfx_addr) {
105                 dev_priv->status_gfx_addr = 0;
106                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
107                 I915_WRITE(HWS_PGA, 0x1ffff000);
108         }
109
110         return 0;
111 }
112
113 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
114 {
115         drm_i915_private_t *dev_priv = dev->dev_private;
116
117         dev_priv->sarea = drm_getsarea(dev);
118         if (!dev_priv->sarea) {
119                 DRM_ERROR("can not find sarea!\n");
120                 i915_dma_cleanup(dev);
121                 return -EINVAL;
122         }
123
124         dev_priv->sarea_priv = (drm_i915_sarea_t *)
125             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
126
127         dev_priv->ring.Start = init->ring_start;
128         dev_priv->ring.End = init->ring_end;
129         dev_priv->ring.Size = init->ring_size;
130         dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
131
132         dev_priv->ring.map.offset = init->ring_start;
133         dev_priv->ring.map.size = init->ring_size;
134         dev_priv->ring.map.type = 0;
135         dev_priv->ring.map.flags = 0;
136         dev_priv->ring.map.mtrr = 0;
137
138         drm_core_ioremap(&dev_priv->ring.map, dev);
139
140         if (dev_priv->ring.map.handle == NULL) {
141                 i915_dma_cleanup(dev);
142                 DRM_ERROR("can not ioremap virtual address for"
143                           " ring buffer\n");
144                 return -ENOMEM;
145         }
146
147         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
148
149         dev_priv->cpp = init->cpp;
150         dev_priv->back_offset = init->back_offset;
151         dev_priv->front_offset = init->front_offset;
152         dev_priv->current_page = 0;
153         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
154
155         /* Allow hardware batchbuffers unless told otherwise.
156          */
157         dev_priv->allow_batchbuffer = 1;
158
159         /* Program Hardware Status Page */
160         if (!I915_NEED_GFX_HWS(dev)) {
161                 dev_priv->status_page_dmah =
162                         drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
163
164                 if (!dev_priv->status_page_dmah) {
165                         i915_dma_cleanup(dev);
166                         DRM_ERROR("Can not allocate hardware status page\n");
167                         return -ENOMEM;
168                 }
169                 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
170                 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
171
172                 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
173                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
174         }
175         DRM_DEBUG("Enabled hardware status page\n");
176         return 0;
177 }
178
179 static int i915_dma_resume(struct drm_device * dev)
180 {
181         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
182
183         DRM_DEBUG("%s\n", __func__);
184
185         if (!dev_priv->sarea) {
186                 DRM_ERROR("can not find sarea!\n");
187                 return -EINVAL;
188         }
189
190         if (dev_priv->ring.map.handle == NULL) {
191                 DRM_ERROR("can not ioremap virtual address for"
192                           " ring buffer\n");
193                 return -ENOMEM;
194         }
195
196         /* Program Hardware Status Page */
197         if (!dev_priv->hw_status_page) {
198                 DRM_ERROR("Can not find hardware status page\n");
199                 return -EINVAL;
200         }
201         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
202
203         if (dev_priv->status_gfx_addr != 0)
204                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
205         else
206                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
207         DRM_DEBUG("Enabled hardware status page\n");
208
209         return 0;
210 }
211
212 static int i915_dma_init(struct drm_device *dev, void *data,
213                          struct drm_file *file_priv)
214 {
215         drm_i915_init_t *init = data;
216         int retcode = 0;
217
218         switch (init->func) {
219         case I915_INIT_DMA:
220                 retcode = i915_initialize(dev, init);
221                 break;
222         case I915_CLEANUP_DMA:
223                 retcode = i915_dma_cleanup(dev);
224                 break;
225         case I915_RESUME_DMA:
226                 retcode = i915_dma_resume(dev);
227                 break;
228         default:
229                 retcode = -EINVAL;
230                 break;
231         }
232
233         return retcode;
234 }
235
236 /* Implement basically the same security restrictions as hardware does
237  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
238  *
239  * Most of the calculations below involve calculating the size of a
240  * particular instruction.  It's important to get the size right as
241  * that tells us where the next instruction to check is.  Any illegal
242  * instruction detected will be given a size of zero, which is a
243  * signal to abort the rest of the buffer.
244  */
245 static int do_validate_cmd(int cmd)
246 {
247         switch (((cmd >> 29) & 0x7)) {
248         case 0x0:
249                 switch ((cmd >> 23) & 0x3f) {
250                 case 0x0:
251                         return 1;       /* MI_NOOP */
252                 case 0x4:
253                         return 1;       /* MI_FLUSH */
254                 default:
255                         return 0;       /* disallow everything else */
256                 }
257                 break;
258         case 0x1:
259                 return 0;       /* reserved */
260         case 0x2:
261                 return (cmd & 0xff) + 2;        /* 2d commands */
262         case 0x3:
263                 if (((cmd >> 24) & 0x1f) <= 0x18)
264                         return 1;
265
266                 switch ((cmd >> 24) & 0x1f) {
267                 case 0x1c:
268                         return 1;
269                 case 0x1d:
270                         switch ((cmd >> 16) & 0xff) {
271                         case 0x3:
272                                 return (cmd & 0x1f) + 2;
273                         case 0x4:
274                                 return (cmd & 0xf) + 2;
275                         default:
276                                 return (cmd & 0xffff) + 2;
277                         }
278                 case 0x1e:
279                         if (cmd & (1 << 23))
280                                 return (cmd & 0xffff) + 1;
281                         else
282                                 return 1;
283                 case 0x1f:
284                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
285                                 return (cmd & 0x1ffff) + 2;
286                         else if (cmd & (1 << 17))       /* indirect random */
287                                 if ((cmd & 0xffff) == 0)
288                                         return 0;       /* unknown length, too hard */
289                                 else
290                                         return (((cmd & 0xffff) + 1) / 2) + 1;
291                         else
292                                 return 2;       /* indirect sequential */
293                 default:
294                         return 0;
295                 }
296         default:
297                 return 0;
298         }
299
300         return 0;
301 }
302
303 static int validate_cmd(int cmd)
304 {
305         int ret = do_validate_cmd(cmd);
306
307 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
308
309         return ret;
310 }
311
312 static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
313 {
314         drm_i915_private_t *dev_priv = dev->dev_private;
315         int i;
316         RING_LOCALS;
317
318         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
319                 return -EINVAL;
320
321         BEGIN_LP_RING((dwords+1)&~1);
322
323         for (i = 0; i < dwords;) {
324                 int cmd, sz;
325
326                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
327                         return -EINVAL;
328
329                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
330                         return -EINVAL;
331
332                 OUT_RING(cmd);
333
334                 while (++i, --sz) {
335                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
336                                                          sizeof(cmd))) {
337                                 return -EINVAL;
338                         }
339                         OUT_RING(cmd);
340                 }
341         }
342
343         if (dwords & 1)
344                 OUT_RING(0);
345
346         ADVANCE_LP_RING();
347
348         return 0;
349 }
350
351 static int i915_emit_box(struct drm_device * dev,
352                          struct drm_clip_rect __user * boxes,
353                          int i, int DR1, int DR4)
354 {
355         drm_i915_private_t *dev_priv = dev->dev_private;
356         struct drm_clip_rect box;
357         RING_LOCALS;
358
359         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
360                 return -EFAULT;
361         }
362
363         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
364                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
365                           box.x1, box.y1, box.x2, box.y2);
366                 return -EINVAL;
367         }
368
369         if (IS_I965G(dev)) {
370                 BEGIN_LP_RING(4);
371                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
372                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
373                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
374                 OUT_RING(DR4);
375                 ADVANCE_LP_RING();
376         } else {
377                 BEGIN_LP_RING(6);
378                 OUT_RING(GFX_OP_DRAWRECT_INFO);
379                 OUT_RING(DR1);
380                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
381                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
382                 OUT_RING(DR4);
383                 OUT_RING(0);
384                 ADVANCE_LP_RING();
385         }
386
387         return 0;
388 }
389
390 /* XXX: Emitting the counter should really be moved to part of the IRQ
391  * emit. For now, do it in both places:
392  */
393
394 static void i915_emit_breadcrumb(struct drm_device *dev)
395 {
396         drm_i915_private_t *dev_priv = dev->dev_private;
397         RING_LOCALS;
398
399         dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
400
401         if (dev_priv->counter > 0x7FFFFFFFUL)
402                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
403
404         BEGIN_LP_RING(4);
405         OUT_RING(MI_STORE_DWORD_INDEX);
406         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
407         OUT_RING(dev_priv->counter);
408         OUT_RING(0);
409         ADVANCE_LP_RING();
410 }
411
412 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
413                                    drm_i915_cmdbuffer_t * cmd)
414 {
415         int nbox = cmd->num_cliprects;
416         int i = 0, count, ret;
417
418         if (cmd->sz & 0x3) {
419                 DRM_ERROR("alignment");
420                 return -EINVAL;
421         }
422
423         i915_kernel_lost_context(dev);
424
425         count = nbox ? nbox : 1;
426
427         for (i = 0; i < count; i++) {
428                 if (i < nbox) {
429                         ret = i915_emit_box(dev, cmd->cliprects, i,
430                                             cmd->DR1, cmd->DR4);
431                         if (ret)
432                                 return ret;
433                 }
434
435                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
436                 if (ret)
437                         return ret;
438         }
439
440         i915_emit_breadcrumb(dev);
441         return 0;
442 }
443
444 static int i915_dispatch_batchbuffer(struct drm_device * dev,
445                                      drm_i915_batchbuffer_t * batch)
446 {
447         drm_i915_private_t *dev_priv = dev->dev_private;
448         struct drm_clip_rect __user *boxes = batch->cliprects;
449         int nbox = batch->num_cliprects;
450         int i = 0, count;
451         RING_LOCALS;
452
453         if ((batch->start | batch->used) & 0x7) {
454                 DRM_ERROR("alignment");
455                 return -EINVAL;
456         }
457
458         i915_kernel_lost_context(dev);
459
460         count = nbox ? nbox : 1;
461
462         for (i = 0; i < count; i++) {
463                 if (i < nbox) {
464                         int ret = i915_emit_box(dev, boxes, i,
465                                                 batch->DR1, batch->DR4);
466                         if (ret)
467                                 return ret;
468                 }
469
470                 if (!IS_I830(dev) && !IS_845G(dev)) {
471                         BEGIN_LP_RING(2);
472                         if (IS_I965G(dev)) {
473                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
474                                 OUT_RING(batch->start);
475                         } else {
476                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
477                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
478                         }
479                         ADVANCE_LP_RING();
480                 } else {
481                         BEGIN_LP_RING(4);
482                         OUT_RING(MI_BATCH_BUFFER);
483                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
484                         OUT_RING(batch->start + batch->used - 4);
485                         OUT_RING(0);
486                         ADVANCE_LP_RING();
487                 }
488         }
489
490         i915_emit_breadcrumb(dev);
491
492         return 0;
493 }
494
495 static int i915_dispatch_flip(struct drm_device * dev)
496 {
497         drm_i915_private_t *dev_priv = dev->dev_private;
498         RING_LOCALS;
499
500         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
501                   __func__,
502                   dev_priv->current_page,
503                   dev_priv->sarea_priv->pf_current_page);
504
505         i915_kernel_lost_context(dev);
506
507         BEGIN_LP_RING(2);
508         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
509         OUT_RING(0);
510         ADVANCE_LP_RING();
511
512         BEGIN_LP_RING(6);
513         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
514         OUT_RING(0);
515         if (dev_priv->current_page == 0) {
516                 OUT_RING(dev_priv->back_offset);
517                 dev_priv->current_page = 1;
518         } else {
519                 OUT_RING(dev_priv->front_offset);
520                 dev_priv->current_page = 0;
521         }
522         OUT_RING(0);
523         ADVANCE_LP_RING();
524
525         BEGIN_LP_RING(2);
526         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
527         OUT_RING(0);
528         ADVANCE_LP_RING();
529
530         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
531
532         BEGIN_LP_RING(4);
533         OUT_RING(MI_STORE_DWORD_INDEX);
534         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
535         OUT_RING(dev_priv->counter);
536         OUT_RING(0);
537         ADVANCE_LP_RING();
538
539         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
540         return 0;
541 }
542
543 static int i915_quiescent(struct drm_device * dev)
544 {
545         drm_i915_private_t *dev_priv = dev->dev_private;
546
547         i915_kernel_lost_context(dev);
548         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
549 }
550
551 static int i915_flush_ioctl(struct drm_device *dev, void *data,
552                             struct drm_file *file_priv)
553 {
554         LOCK_TEST_WITH_RETURN(dev, file_priv);
555
556         return i915_quiescent(dev);
557 }
558
559 static int i915_batchbuffer(struct drm_device *dev, void *data,
560                             struct drm_file *file_priv)
561 {
562         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
563         u32 *hw_status = dev_priv->hw_status_page;
564         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
565             dev_priv->sarea_priv;
566         drm_i915_batchbuffer_t *batch = data;
567         int ret;
568
569         if (!dev_priv->allow_batchbuffer) {
570                 DRM_ERROR("Batchbuffer ioctl disabled\n");
571                 return -EINVAL;
572         }
573
574         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
575                   batch->start, batch->used, batch->num_cliprects);
576
577         LOCK_TEST_WITH_RETURN(dev, file_priv);
578
579         if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
580                                                        batch->num_cliprects *
581                                                        sizeof(struct drm_clip_rect)))
582                 return -EFAULT;
583
584         ret = i915_dispatch_batchbuffer(dev, batch);
585
586         sarea_priv->last_dispatch = (int)hw_status[5];
587         return ret;
588 }
589
590 static int i915_cmdbuffer(struct drm_device *dev, void *data,
591                           struct drm_file *file_priv)
592 {
593         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
594         u32 *hw_status = dev_priv->hw_status_page;
595         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
596             dev_priv->sarea_priv;
597         drm_i915_cmdbuffer_t *cmdbuf = data;
598         int ret;
599
600         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
601                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
602
603         LOCK_TEST_WITH_RETURN(dev, file_priv);
604
605         if (cmdbuf->num_cliprects &&
606             DRM_VERIFYAREA_READ(cmdbuf->cliprects,
607                                 cmdbuf->num_cliprects *
608                                 sizeof(struct drm_clip_rect))) {
609                 DRM_ERROR("Fault accessing cliprects\n");
610                 return -EFAULT;
611         }
612
613         ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
614         if (ret) {
615                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
616                 return ret;
617         }
618
619         sarea_priv->last_dispatch = (int)hw_status[5];
620         return 0;
621 }
622
623 static int i915_flip_bufs(struct drm_device *dev, void *data,
624                           struct drm_file *file_priv)
625 {
626         DRM_DEBUG("%s\n", __func__);
627
628         LOCK_TEST_WITH_RETURN(dev, file_priv);
629
630         return i915_dispatch_flip(dev);
631 }
632
633 static int i915_getparam(struct drm_device *dev, void *data,
634                          struct drm_file *file_priv)
635 {
636         drm_i915_private_t *dev_priv = dev->dev_private;
637         drm_i915_getparam_t *param = data;
638         int value;
639
640         if (!dev_priv) {
641                 DRM_ERROR("called with no initialization\n");
642                 return -EINVAL;
643         }
644
645         switch (param->param) {
646         case I915_PARAM_IRQ_ACTIVE:
647                 value = dev->irq_enabled;
648                 break;
649         case I915_PARAM_ALLOW_BATCHBUFFER:
650                 value = dev_priv->allow_batchbuffer ? 1 : 0;
651                 break;
652         case I915_PARAM_LAST_DISPATCH:
653                 value = READ_BREADCRUMB(dev_priv);
654                 break;
655         default:
656                 DRM_ERROR("Unknown parameter %d\n", param->param);
657                 return -EINVAL;
658         }
659
660         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
661                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
662                 return -EFAULT;
663         }
664
665         return 0;
666 }
667
668 static int i915_setparam(struct drm_device *dev, void *data,
669                          struct drm_file *file_priv)
670 {
671         drm_i915_private_t *dev_priv = dev->dev_private;
672         drm_i915_setparam_t *param = data;
673
674         if (!dev_priv) {
675                 DRM_ERROR("called with no initialization\n");
676                 return -EINVAL;
677         }
678
679         switch (param->param) {
680         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
681                 break;
682         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
683                 dev_priv->tex_lru_log_granularity = param->value;
684                 break;
685         case I915_SETPARAM_ALLOW_BATCHBUFFER:
686                 dev_priv->allow_batchbuffer = param->value;
687                 break;
688         default:
689                 DRM_ERROR("unknown parameter %d\n", param->param);
690                 return -EINVAL;
691         }
692
693         return 0;
694 }
695
696 static int i915_set_status_page(struct drm_device *dev, void *data,
697                                 struct drm_file *file_priv)
698 {
699         drm_i915_private_t *dev_priv = dev->dev_private;
700         drm_i915_hws_addr_t *hws = data;
701
702         if (!I915_NEED_GFX_HWS(dev))
703                 return -EINVAL;
704
705         if (!dev_priv) {
706                 DRM_ERROR("called with no initialization\n");
707                 return -EINVAL;
708         }
709
710         printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
711
712         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
713
714         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
715         dev_priv->hws_map.size = 4*1024;
716         dev_priv->hws_map.type = 0;
717         dev_priv->hws_map.flags = 0;
718         dev_priv->hws_map.mtrr = 0;
719
720         drm_core_ioremap(&dev_priv->hws_map, dev);
721         if (dev_priv->hws_map.handle == NULL) {
722                 i915_dma_cleanup(dev);
723                 dev_priv->status_gfx_addr = 0;
724                 DRM_ERROR("can not ioremap virtual address for"
725                                 " G33 hw status page\n");
726                 return -ENOMEM;
727         }
728         dev_priv->hw_status_page = dev_priv->hws_map.handle;
729
730         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
731         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
732         DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
733                         dev_priv->status_gfx_addr);
734         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
735         return 0;
736 }
737
738 int i915_driver_load(struct drm_device *dev, unsigned long flags)
739 {
740         struct drm_i915_private *dev_priv = dev->dev_private;
741         unsigned long base, size;
742         int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
743
744         /* i915 has 4 more counters */
745         dev->counters += 4;
746         dev->types[6] = _DRM_STAT_IRQ;
747         dev->types[7] = _DRM_STAT_PRIMARY;
748         dev->types[8] = _DRM_STAT_SECONDARY;
749         dev->types[9] = _DRM_STAT_DMA;
750
751         dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
752         if (dev_priv == NULL)
753                 return -ENOMEM;
754
755         memset(dev_priv, 0, sizeof(drm_i915_private_t));
756
757         dev->dev_private = (void *)dev_priv;
758
759         /* Add register map (needed for suspend/resume) */
760         base = drm_get_resource_start(dev, mmio_bar);
761         size = drm_get_resource_len(dev, mmio_bar);
762
763         ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
764                          _DRM_KERNEL | _DRM_DRIVER,
765                          &dev_priv->mmio_map);
766
767
768         /* On the 945G/GM, the chipset reports the MSI capability on the
769          * integrated graphics even though the support isn't actually there
770          * according to the published specs.  It doesn't appear to function
771          * correctly in testing on 945G.
772          * This may be a side effect of MSI having been made available for PEG
773          * and the registers being closely associated.
774          */
775         if (!IS_I945G(dev) && !IS_I945GM(dev))
776                 pci_enable_msi(dev->pdev);
777
778         spin_lock_init(&dev_priv->user_irq_lock);
779
780         return ret;
781 }
782
783 int i915_driver_unload(struct drm_device *dev)
784 {
785         struct drm_i915_private *dev_priv = dev->dev_private;
786
787         if (dev->pdev->msi_enabled)
788                 pci_disable_msi(dev->pdev);
789
790         if (dev_priv->mmio_map)
791                 drm_rmmap(dev, dev_priv->mmio_map);
792
793         drm_free(dev->dev_private, sizeof(drm_i915_private_t),
794                  DRM_MEM_DRIVER);
795
796         return 0;
797 }
798
799 void i915_driver_lastclose(struct drm_device * dev)
800 {
801         drm_i915_private_t *dev_priv = dev->dev_private;
802
803         if (!dev_priv)
804                 return;
805
806         if (dev_priv->agp_heap)
807                 i915_mem_takedown(&(dev_priv->agp_heap));
808
809         i915_dma_cleanup(dev);
810 }
811
812 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
813 {
814         drm_i915_private_t *dev_priv = dev->dev_private;
815         i915_mem_release(dev, file_priv, dev_priv->agp_heap);
816 }
817
818 struct drm_ioctl_desc i915_ioctls[] = {
819         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
820         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
821         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
822         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
823         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
824         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
825         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
826         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
827         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
828         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
829         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
830         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
831         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
832         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
833         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
834         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
835         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
836 };
837
838 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
839
840 /**
841  * Determine if the device really is AGP or not.
842  *
843  * All Intel graphics chipsets are treated as AGP, even if they are really
844  * PCI-e.
845  *
846  * \param dev   The device to be tested.
847  *
848  * \returns
849  * A value of 1 is always retured to indictate every i9x5 is AGP.
850  */
851 int i915_driver_device_is_agp(struct drm_device * dev)
852 {
853         return 1;
854 }