Linux-2.6.12-rc2
[safe/jmp/linux-2.6] / drivers / char / drm / gamma_dma.c
1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31
32 #include "gamma.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39 #include <linux/delay.h>
40
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42                                       unsigned long length)
43 {
44         drm_gamma_private_t *dev_priv =
45                                 (drm_gamma_private_t *)dev->dev_private;
46         mb();
47         while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
48                 cpu_relax();
49
50         GAMMA_WRITE(GAMMA_DMAADDRESS, address);
51
52         while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53                 cpu_relax();
54
55         GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
56 }
57
58 void gamma_dma_quiescent_single(drm_device_t *dev)
59 {
60         drm_gamma_private_t *dev_priv =
61                                 (drm_gamma_private_t *)dev->dev_private;
62         while (GAMMA_READ(GAMMA_DMACOUNT))
63                 cpu_relax();
64
65         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
66                 cpu_relax();
67
68         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69         GAMMA_WRITE(GAMMA_SYNC, 0);
70
71         do {
72                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73                         cpu_relax();
74         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
75 }
76
77 void gamma_dma_quiescent_dual(drm_device_t *dev)
78 {
79         drm_gamma_private_t *dev_priv =
80                                 (drm_gamma_private_t *)dev->dev_private;
81         while (GAMMA_READ(GAMMA_DMACOUNT))
82                 cpu_relax();
83
84         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
85                 cpu_relax();
86
87         GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89         GAMMA_WRITE(GAMMA_SYNC, 0);
90
91         /* Read from first MX */
92         do {
93                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94                         cpu_relax();
95         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97         /* Read from second MX */
98         do {
99                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100                         cpu_relax();
101         } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102 }
103
104 void gamma_dma_ready(drm_device_t *dev)
105 {
106         drm_gamma_private_t *dev_priv =
107                                 (drm_gamma_private_t *)dev->dev_private;
108         while (GAMMA_READ(GAMMA_DMACOUNT))
109                 cpu_relax();
110 }
111
112 static inline int gamma_dma_is_ready(drm_device_t *dev)
113 {
114         drm_gamma_private_t *dev_priv =
115                                 (drm_gamma_private_t *)dev->dev_private;
116         return (!GAMMA_READ(GAMMA_DMACOUNT));
117 }
118
119 irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
120 {
121         drm_device_t     *dev = (drm_device_t *)arg;
122         drm_device_dma_t *dma = dev->dma;
123         drm_gamma_private_t *dev_priv =
124                                 (drm_gamma_private_t *)dev->dev_private;
125
126         /* FIXME: should check whether we're actually interested in the interrupt? */
127         atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
128
129         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
130                 cpu_relax();
131
132         GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133         GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134         GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135         if (gamma_dma_is_ready(dev)) {
136                                 /* Free previous buffer */
137                 if (test_and_set_bit(0, &dev->dma_flag))
138                         return IRQ_HANDLED;
139                 if (dma->this_buffer) {
140                         gamma_free_buffer(dev, dma->this_buffer);
141                         dma->this_buffer = NULL;
142                 }
143                 clear_bit(0, &dev->dma_flag);
144
145                 /* Dispatch new buffer */
146                 schedule_work(&dev->work);
147         }
148         return IRQ_HANDLED;
149 }
150
151 /* Only called by gamma_dma_schedule. */
152 static int gamma_do_dma(drm_device_t *dev, int locked)
153 {
154         unsigned long    address;
155         unsigned long    length;
156         drm_buf_t        *buf;
157         int              retcode = 0;
158         drm_device_dma_t *dma = dev->dma;
159
160         if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
161
162
163         if (!dma->next_buffer) {
164                 DRM_ERROR("No next_buffer\n");
165                 clear_bit(0, &dev->dma_flag);
166                 return -EINVAL;
167         }
168
169         buf     = dma->next_buffer;
170         /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171         /* So we pass the buffer index value into the physical page offset */
172         address = buf->idx << 12;
173         length  = buf->used;
174
175         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176                   buf->context, buf->idx, length);
177
178         if (buf->list == DRM_LIST_RECLAIM) {
179                 gamma_clear_next_buffer(dev);
180                 gamma_free_buffer(dev, buf);
181                 clear_bit(0, &dev->dma_flag);
182                 return -EINVAL;
183         }
184
185         if (!length) {
186                 DRM_ERROR("0 length buffer\n");
187                 gamma_clear_next_buffer(dev);
188                 gamma_free_buffer(dev, buf);
189                 clear_bit(0, &dev->dma_flag);
190                 return 0;
191         }
192
193         if (!gamma_dma_is_ready(dev)) {
194                 clear_bit(0, &dev->dma_flag);
195                 return -EBUSY;
196         }
197
198         if (buf->while_locked) {
199                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200                         DRM_ERROR("Dispatching buffer %d from pid %d"
201                                   " \"while locked\", but no lock held\n",
202                                   buf->idx, current->pid);
203                 }
204         } else {
205                 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206                                               DRM_KERNEL_CONTEXT)) {
207                         clear_bit(0, &dev->dma_flag);
208                         return -EBUSY;
209                 }
210         }
211
212         if (dev->last_context != buf->context
213             && !(dev->queuelist[buf->context]->flags
214                  & _DRM_CONTEXT_PRESERVED)) {
215                                 /* PRE: dev->last_context != buf->context */
216                 if (DRM(context_switch)(dev, dev->last_context,
217                                         buf->context)) {
218                         DRM(clear_next_buffer)(dev);
219                         DRM(free_buffer)(dev, buf);
220                 }
221                 retcode = -EBUSY;
222                 goto cleanup;
223
224                                 /* POST: we will wait for the context
225                                    switch and will dispatch on a later call
226                                    when dev->last_context == buf->context.
227                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
228                                    TIME! */
229         }
230
231         gamma_clear_next_buffer(dev);
232         buf->pending     = 1;
233         buf->waiting     = 0;
234         buf->list        = DRM_LIST_PEND;
235
236         /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237         address = buf->idx << 12;
238
239         gamma_dma_dispatch(dev, address, length);
240         gamma_free_buffer(dev, dma->this_buffer);
241         dma->this_buffer = buf;
242
243         atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244         atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
245
246         if (!buf->while_locked && !dev->context_flag && !locked) {
247                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248                                   DRM_KERNEL_CONTEXT)) {
249                         DRM_ERROR("\n");
250                 }
251         }
252 cleanup:
253
254         clear_bit(0, &dev->dma_flag);
255
256
257         return retcode;
258 }
259
260 static void gamma_dma_timer_bh(unsigned long dev)
261 {
262         gamma_dma_schedule((drm_device_t *)dev, 0);
263 }
264
265 void gamma_irq_immediate_bh(void *dev)
266 {
267         gamma_dma_schedule(dev, 0);
268 }
269
270 int gamma_dma_schedule(drm_device_t *dev, int locked)
271 {
272         int              next;
273         drm_queue_t      *q;
274         drm_buf_t        *buf;
275         int              retcode   = 0;
276         int              processed = 0;
277         int              missed;
278         int              expire    = 20;
279         drm_device_dma_t *dma      = dev->dma;
280
281         if (test_and_set_bit(0, &dev->interrupt_flag)) {
282                                 /* Not reentrant */
283                 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284                 return -EBUSY;
285         }
286         missed = atomic_read(&dev->counts[10]);
287
288
289 again:
290         if (dev->context_flag) {
291                 clear_bit(0, &dev->interrupt_flag);
292                 return -EBUSY;
293         }
294         if (dma->next_buffer) {
295                                 /* Unsent buffer that was previously
296                                    selected, but that couldn't be sent
297                                    because the lock could not be obtained
298                                    or the DMA engine wasn't ready.  Try
299                                    again. */
300                 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
301         } else {
302                 do {
303                         next = gamma_select_queue(dev, gamma_dma_timer_bh);
304                         if (next >= 0) {
305                                 q   = dev->queuelist[next];
306                                 buf = gamma_waitlist_get(&q->waitlist);
307                                 dma->next_buffer = buf;
308                                 dma->next_queue  = q;
309                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
310                                         gamma_clear_next_buffer(dev);
311                                         gamma_free_buffer(dev, buf);
312                                 }
313                         }
314                 } while (next >= 0 && !dma->next_buffer);
315                 if (dma->next_buffer) {
316                         if (!(retcode = gamma_do_dma(dev, locked))) {
317                                 ++processed;
318                         }
319                 }
320         }
321
322         if (--expire) {
323                 if (missed != atomic_read(&dev->counts[10])) {
324                         if (gamma_dma_is_ready(dev)) goto again;
325                 }
326                 if (processed && gamma_dma_is_ready(dev)) {
327                         processed = 0;
328                         goto again;
329                 }
330         }
331
332         clear_bit(0, &dev->interrupt_flag);
333
334         return retcode;
335 }
336
337 static int gamma_dma_priority(struct file *filp, 
338                               drm_device_t *dev, drm_dma_t *d)
339 {
340         unsigned long     address;
341         unsigned long     length;
342         int               must_free = 0;
343         int               retcode   = 0;
344         int               i;
345         int               idx;
346         drm_buf_t         *buf;
347         drm_buf_t         *last_buf = NULL;
348         drm_device_dma_t  *dma      = dev->dma;
349         int               *send_indices = NULL;
350         int               *send_sizes = NULL;
351
352         DECLARE_WAITQUEUE(entry, current);
353
354                                 /* Turn off interrupt handling */
355         while (test_and_set_bit(0, &dev->interrupt_flag)) {
356                 schedule();
357                 if (signal_pending(current)) return -EINTR;
358         }
359         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
360                 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
361                                       DRM_KERNEL_CONTEXT)) {
362                         schedule();
363                         if (signal_pending(current)) {
364                                 clear_bit(0, &dev->interrupt_flag);
365                                 return -EINTR;
366                         }
367                 }
368                 ++must_free;
369         }
370
371         send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
372                                   DRM_MEM_DRIVER);
373         if (send_indices == NULL)
374                 return -ENOMEM;
375         if (copy_from_user(send_indices, d->send_indices, 
376                            d->send_count * sizeof(*send_indices))) {
377                 retcode = -EFAULT;
378                 goto cleanup;
379         }
380         
381         send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
382                                 DRM_MEM_DRIVER);
383         if (send_sizes == NULL)
384                 return -ENOMEM;
385         if (copy_from_user(send_sizes, d->send_sizes, 
386                            d->send_count * sizeof(*send_sizes))) {
387                 retcode = -EFAULT;
388                 goto cleanup;
389         }
390
391         for (i = 0; i < d->send_count; i++) {
392                 idx = send_indices[i];
393                 if (idx < 0 || idx >= dma->buf_count) {
394                         DRM_ERROR("Index %d (of %d max)\n",
395                                   send_indices[i], dma->buf_count - 1);
396                         continue;
397                 }
398                 buf = dma->buflist[ idx ];
399                 if (buf->filp != filp) {
400                         DRM_ERROR("Process %d using buffer not owned\n",
401                                   current->pid);
402                         retcode = -EINVAL;
403                         goto cleanup;
404                 }
405                 if (buf->list != DRM_LIST_NONE) {
406                         DRM_ERROR("Process %d using buffer on list %d\n",
407                                   current->pid, buf->list);
408                         retcode = -EINVAL;
409                         goto cleanup;
410                 }
411                                 /* This isn't a race condition on
412                                    buf->list, since our concern is the
413                                    buffer reclaim during the time the
414                                    process closes the /dev/drm? handle, so
415                                    it can't also be doing DMA. */
416                 buf->list         = DRM_LIST_PRIO;
417                 buf->used         = send_sizes[i];
418                 buf->context      = d->context;
419                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
420                 address           = (unsigned long)buf->address;
421                 length            = buf->used;
422                 if (!length) {
423                         DRM_ERROR("0 length buffer\n");
424                 }
425                 if (buf->pending) {
426                         DRM_ERROR("Sending pending buffer:"
427                                   " buffer %d, offset %d\n",
428                                   send_indices[i], i);
429                         retcode = -EINVAL;
430                         goto cleanup;
431                 }
432                 if (buf->waiting) {
433                         DRM_ERROR("Sending waiting buffer:"
434                                   " buffer %d, offset %d\n",
435                                   send_indices[i], i);
436                         retcode = -EINVAL;
437                         goto cleanup;
438                 }
439                 buf->pending = 1;
440
441                 if (dev->last_context != buf->context
442                     && !(dev->queuelist[buf->context]->flags
443                          & _DRM_CONTEXT_PRESERVED)) {
444                         add_wait_queue(&dev->context_wait, &entry);
445                         current->state = TASK_INTERRUPTIBLE;
446                                 /* PRE: dev->last_context != buf->context */
447                         DRM(context_switch)(dev, dev->last_context,
448                                             buf->context);
449                                 /* POST: we will wait for the context
450                                    switch and will dispatch on a later call
451                                    when dev->last_context == buf->context.
452                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
453                                    TIME! */
454                         schedule();
455                         current->state = TASK_RUNNING;
456                         remove_wait_queue(&dev->context_wait, &entry);
457                         if (signal_pending(current)) {
458                                 retcode = -EINTR;
459                                 goto cleanup;
460                         }
461                         if (dev->last_context != buf->context) {
462                                 DRM_ERROR("Context mismatch: %d %d\n",
463                                           dev->last_context,
464                                           buf->context);
465                         }
466                 }
467
468                 gamma_dma_dispatch(dev, address, length);
469                 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
470                 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
471
472                 if (last_buf) {
473                         gamma_free_buffer(dev, last_buf);
474                 }
475                 last_buf = buf;
476         }
477
478
479 cleanup:
480         if (last_buf) {
481                 gamma_dma_ready(dev);
482                 gamma_free_buffer(dev, last_buf);
483         }
484         if (send_indices)
485                 DRM(free)(send_indices, d->send_count * sizeof(*send_indices), 
486                           DRM_MEM_DRIVER);
487         if (send_sizes)
488                 DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes), 
489                           DRM_MEM_DRIVER);
490
491         if (must_free && !dev->context_flag) {
492                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
493                                   DRM_KERNEL_CONTEXT)) {
494                         DRM_ERROR("\n");
495                 }
496         }
497         clear_bit(0, &dev->interrupt_flag);
498         return retcode;
499 }
500
501 static int gamma_dma_send_buffers(struct file *filp,
502                                   drm_device_t *dev, drm_dma_t *d)
503 {
504         DECLARE_WAITQUEUE(entry, current);
505         drm_buf_t         *last_buf = NULL;
506         int               retcode   = 0;
507         drm_device_dma_t  *dma      = dev->dma;
508         int               send_index;
509
510         if (get_user(send_index, &d->send_indices[d->send_count-1]))
511                 return -EFAULT;
512
513         if (d->flags & _DRM_DMA_BLOCK) {
514                 last_buf = dma->buflist[send_index];
515                 add_wait_queue(&last_buf->dma_wait, &entry);
516         }
517
518         if ((retcode = gamma_dma_enqueue(filp, d))) {
519                 if (d->flags & _DRM_DMA_BLOCK)
520                         remove_wait_queue(&last_buf->dma_wait, &entry);
521                 return retcode;
522         }
523
524         gamma_dma_schedule(dev, 0);
525
526         if (d->flags & _DRM_DMA_BLOCK) {
527                 DRM_DEBUG("%d waiting\n", current->pid);
528                 for (;;) {
529                         current->state = TASK_INTERRUPTIBLE;
530                         if (!last_buf->waiting && !last_buf->pending)
531                                 break; /* finished */
532                         schedule();
533                         if (signal_pending(current)) {
534                                 retcode = -EINTR; /* Can't restart */
535                                 break;
536                         }
537                 }
538                 current->state = TASK_RUNNING;
539                 DRM_DEBUG("%d running\n", current->pid);
540                 remove_wait_queue(&last_buf->dma_wait, &entry);
541                 if (!retcode
542                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
543                         if (!waitqueue_active(&last_buf->dma_wait)) {
544                                 gamma_free_buffer(dev, last_buf);
545                         }
546                 }
547                 if (retcode) {
548                         DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
549                                   d->context,
550                                   last_buf->waiting,
551                                   last_buf->pending,
552                                   (long)DRM_WAITCOUNT(dev, d->context),
553                                   last_buf->idx,
554                                   last_buf->list,
555                                   current->pid);
556                 }
557         }
558         return retcode;
559 }
560
561 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
562               unsigned long arg)
563 {
564         drm_file_t        *priv     = filp->private_data;
565         drm_device_t      *dev      = priv->dev;
566         drm_device_dma_t  *dma      = dev->dma;
567         int               retcode   = 0;
568         drm_dma_t         __user *argp = (void __user *)arg;
569         drm_dma_t         d;
570
571         if (copy_from_user(&d, argp, sizeof(d)))
572                 return -EFAULT;
573
574         if (d.send_count < 0 || d.send_count > dma->buf_count) {
575                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576                           current->pid, d.send_count, dma->buf_count);
577                 return -EINVAL;
578         }
579
580         if (d.request_count < 0 || d.request_count > dma->buf_count) {
581                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582                           current->pid, d.request_count, dma->buf_count);
583                 return -EINVAL;
584         }
585
586         if (d.send_count) {
587                 if (d.flags & _DRM_DMA_PRIORITY)
588                         retcode = gamma_dma_priority(filp, dev, &d);
589                 else
590                         retcode = gamma_dma_send_buffers(filp, dev, &d);
591         }
592
593         d.granted_count = 0;
594
595         if (!retcode && d.request_count) {
596                 retcode = gamma_dma_get_buffers(filp, &d);
597         }
598
599         DRM_DEBUG("%d returning, granted = %d\n",
600                   current->pid, d.granted_count);
601         if (copy_to_user(argp, &d, sizeof(d)))
602                 return -EFAULT;
603
604         return retcode;
605 }
606
607 /* =============================================================
608  * DMA initialization, cleanup
609  */
610
611 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
612 {
613         drm_gamma_private_t *dev_priv;
614         drm_device_dma_t    *dma = dev->dma;
615         drm_buf_t           *buf;
616         int i;
617         struct list_head    *list;
618         unsigned long       *pgt;
619
620         DRM_DEBUG( "%s\n", __FUNCTION__ );
621
622         dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
623                                                         DRM_MEM_DRIVER );
624         if ( !dev_priv )
625                 return -ENOMEM;
626
627         dev->dev_private = (void *)dev_priv;
628
629         memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
630
631         dev_priv->num_rast = init->num_rast;
632
633         list_for_each(list, &dev->maplist->head) {
634                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
635                 if( r_list->map &&
636                     r_list->map->type == _DRM_SHM &&
637                     r_list->map->flags & _DRM_CONTAINS_LOCK ) {
638                         dev_priv->sarea = r_list->map;
639                         break;
640                 }
641         }
642         
643         dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
644         dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
645         dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
646         dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
647         
648         dev_priv->sarea_priv = (drm_gamma_sarea_t *)
649                 ((u8 *)dev_priv->sarea->handle +
650                  init->sarea_priv_offset);
651
652         if (init->pcimode) {
653                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
654                 pgt = buf->address;
655
656                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
657                         buf = dma->buflist[i];
658                         *pgt = virt_to_phys((void*)buf->address) | 0x07;
659                         pgt++;
660                 }
661
662                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
663         } else {
664                 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
665                 drm_core_ioremap( dev->agp_buffer_map, dev);
666
667                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
668                 pgt = buf->address;
669
670                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
671                         buf = dma->buflist[i];
672                         *pgt = (unsigned long)buf->address + 0x07;
673                         pgt++;
674                 }
675
676                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
677
678                 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
679                 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
680         }
681         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
682         GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
683         GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
684
685         return 0;
686 }
687
688 int gamma_do_cleanup_dma( drm_device_t *dev )
689 {
690         DRM_DEBUG( "%s\n", __FUNCTION__ );
691
692         /* Make sure interrupts are disabled here because the uninstall ioctl
693          * may not have been called from userspace and after dev_private
694          * is freed, it's too late.
695          */
696         if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
697                 if ( dev->irq_enabled ) 
698                         DRM(irq_uninstall)(dev);
699
700         if ( dev->dev_private ) {
701
702                 if ( dev->agp_buffer_map != NULL )
703                         drm_core_ioremapfree( dev->agp_buffer_map, dev );
704
705                 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
706                            DRM_MEM_DRIVER );
707                 dev->dev_private = NULL;
708         }
709
710         return 0;
711 }
712
713 int gamma_dma_init( struct inode *inode, struct file *filp,
714                   unsigned int cmd, unsigned long arg )
715 {
716         drm_file_t *priv = filp->private_data;
717         drm_device_t *dev = priv->dev;
718         drm_gamma_init_t init;
719
720         LOCK_TEST_WITH_RETURN( dev, filp );
721
722         if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
723                 return -EFAULT;
724
725         switch ( init.func ) {
726         case GAMMA_INIT_DMA:
727                 return gamma_do_init_dma( dev, &init );
728         case GAMMA_CLEANUP_DMA:
729                 return gamma_do_cleanup_dma( dev );
730         }
731
732         return -EINVAL;
733 }
734
735 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
736 {
737         drm_device_dma_t    *dma = dev->dma;
738         unsigned int        *screenbuf;
739
740         DRM_DEBUG( "%s\n", __FUNCTION__ );
741
742         /* We've DRM_RESTRICTED this DMA buffer */
743
744         screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
745
746 #if 0
747         *buffer++ = 0x180;      /* Tag (FilterMode) */
748         *buffer++ = 0x200;      /* Allow FBColor through */
749         *buffer++ = 0x53B;      /* Tag */
750         *buffer++ = copy->Pitch;
751         *buffer++ = 0x53A;      /* Tag */
752         *buffer++ = copy->SrcAddress;
753         *buffer++ = 0x539;      /* Tag */
754         *buffer++ = copy->WidthHeight; /* Initiates transfer */
755         *buffer++ = 0x53C;      /* Tag - DMAOutputAddress */
756         *buffer++ = virt_to_phys((void*)screenbuf);
757         *buffer++ = 0x53D;      /* Tag - DMAOutputCount */
758         *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
759
760         /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
761         /* Now put it back to the screen */
762
763         *buffer++ = 0x180;      /* Tag (FilterMode) */
764         *buffer++ = 0x400;      /* Allow Sync through */
765         *buffer++ = 0x538;      /* Tag - DMARectangleReadTarget */
766         *buffer++ = 0x155;      /* FBSourceData | count */
767         *buffer++ = 0x537;      /* Tag */
768         *buffer++ = copy->Pitch;
769         *buffer++ = 0x536;      /* Tag */
770         *buffer++ = copy->DstAddress;
771         *buffer++ = 0x535;      /* Tag */
772         *buffer++ = copy->WidthHeight; /* Initiates transfer */
773         *buffer++ = 0x530;      /* Tag - DMAAddr */
774         *buffer++ = virt_to_phys((void*)screenbuf);
775         *buffer++ = 0x531;
776         *buffer++ = copy->Count; /* initiates DMA transfer of color data */
777 #endif
778
779         /* need to dispatch it now */
780
781         return 0;
782 }
783
784 int gamma_dma_copy( struct inode *inode, struct file *filp,
785                   unsigned int cmd, unsigned long arg )
786 {
787         drm_file_t *priv = filp->private_data;
788         drm_device_t *dev = priv->dev;
789         drm_gamma_copy_t copy;
790
791         if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
792                 return -EFAULT;
793
794         return gamma_do_copy_dma( dev, &copy );
795 }
796
797 /* =============================================================
798  * Per Context SAREA Support
799  */
800
801 int gamma_getsareactx(struct inode *inode, struct file *filp,
802                      unsigned int cmd, unsigned long arg)
803 {
804         drm_file_t      *priv   = filp->private_data;
805         drm_device_t    *dev    = priv->dev;
806         drm_ctx_priv_map_t __user *argp = (void __user *)arg;
807         drm_ctx_priv_map_t request;
808         drm_map_t *map;
809
810         if (copy_from_user(&request, argp, sizeof(request)))
811                 return -EFAULT;
812
813         down(&dev->struct_sem);
814         if ((int)request.ctx_id >= dev->max_context) {
815                 up(&dev->struct_sem);
816                 return -EINVAL;
817         }
818
819         map = dev->context_sareas[request.ctx_id];
820         up(&dev->struct_sem);
821
822         request.handle = map->handle;
823         if (copy_to_user(argp, &request, sizeof(request)))
824                 return -EFAULT;
825         return 0;
826 }
827
828 int gamma_setsareactx(struct inode *inode, struct file *filp,
829                      unsigned int cmd, unsigned long arg)
830 {
831         drm_file_t      *priv   = filp->private_data;
832         drm_device_t    *dev    = priv->dev;
833         drm_ctx_priv_map_t request;
834         drm_map_t *map = NULL;
835         drm_map_list_t *r_list;
836         struct list_head *list;
837
838         if (copy_from_user(&request,
839                            (drm_ctx_priv_map_t __user *)arg,
840                            sizeof(request)))
841                 return -EFAULT;
842
843         down(&dev->struct_sem);
844         r_list = NULL;
845         list_for_each(list, &dev->maplist->head) {
846                 r_list = list_entry(list, drm_map_list_t, head);
847                 if(r_list->map &&
848                    r_list->map->handle == request.handle) break;
849         }
850         if (list == &(dev->maplist->head)) {
851                 up(&dev->struct_sem);
852                 return -EINVAL;
853         }
854         map = r_list->map;
855         up(&dev->struct_sem);
856
857         if (!map) return -EINVAL;
858
859         down(&dev->struct_sem);
860         if ((int)request.ctx_id >= dev->max_context) {
861                 up(&dev->struct_sem);
862                 return -EINVAL;
863         }
864         dev->context_sareas[request.ctx_id] = map;
865         up(&dev->struct_sem);
866         return 0;
867 }
868
869 void gamma_driver_irq_preinstall( drm_device_t *dev ) {
870         drm_gamma_private_t *dev_priv =
871                                 (drm_gamma_private_t *)dev->dev_private;
872
873         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
874                 cpu_relax();
875
876         GAMMA_WRITE( GAMMA_GCOMMANDMODE,        0x00000004 );
877         GAMMA_WRITE( GAMMA_GDMACONTROL,         0x00000000 );
878 }
879
880 void gamma_driver_irq_postinstall( drm_device_t *dev ) {
881         drm_gamma_private_t *dev_priv =
882                                 (drm_gamma_private_t *)dev->dev_private;
883
884         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
885                 cpu_relax();
886
887         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00002001 );
888         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000008 );
889         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00039090 );
890 }
891
892 void gamma_driver_irq_uninstall( drm_device_t *dev ) {
893         drm_gamma_private_t *dev_priv =
894                                 (drm_gamma_private_t *)dev->dev_private;
895         if (!dev_priv)
896                 return;
897
898         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
899                 cpu_relax();
900
901         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00000000 );
902         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000000 );
903         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00000000 );
904 }
905
906 extern drm_ioctl_desc_t DRM(ioctls)[];
907
908 static int gamma_driver_preinit(drm_device_t *dev)
909 {
910         /* reset the finish ioctl */
911         DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
912         return 0;
913 }
914
915 static void gamma_driver_pretakedown(drm_device_t *dev)
916 {
917         gamma_do_cleanup_dma(dev);
918 }
919
920 static void gamma_driver_dma_ready(drm_device_t *dev)
921 {
922         gamma_dma_ready(dev);
923 }
924
925 static int gamma_driver_dma_quiescent(drm_device_t *dev)
926 {
927         drm_gamma_private_t *dev_priv = (
928                 drm_gamma_private_t *)dev->dev_private;
929         if (dev_priv->num_rast == 2)
930                 gamma_dma_quiescent_dual(dev);
931         else gamma_dma_quiescent_single(dev);
932         return 0;
933 }
934
935 void gamma_driver_register_fns(drm_device_t *dev)
936 {
937         dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
938         DRM(fops).read = gamma_fops_read;
939         DRM(fops).poll = gamma_fops_poll;
940         dev->driver.preinit = gamma_driver_preinit;
941         dev->driver.pretakedown = gamma_driver_pretakedown;
942         dev->driver.dma_ready = gamma_driver_dma_ready;
943         dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
944         dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
945         dev->driver.dma_flush_unblock = gamma_flush_unblock;
946 }