drm: fix up mmap locking in preparation for ttm changes
[safe/jmp/linux-2.6] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
45 {
46         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
47
48 #if defined(__i386__) || defined(__x86_64__)
49         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50                 pgprot_val(tmp) |= _PAGE_PCD;
51                 pgprot_val(tmp) &= ~_PAGE_PWT;
52         }
53 #elif defined(__powerpc__)
54         pgprot_val(tmp) |= _PAGE_NO_CACHE;
55         if (map_type == _DRM_REGISTERS)
56                 pgprot_val(tmp) |= _PAGE_GUARDED;
57 #endif
58 #if defined(__ia64__)
59         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60                                     vma->vm_start))
61                 tmp = pgprot_writecombine(tmp);
62         else
63                 tmp = pgprot_noncached(tmp);
64 #endif
65         return tmp;
66 }
67
68 /**
69  * \c nopage method for AGP virtual memory.
70  *
71  * \param vma virtual memory area.
72  * \param address access address.
73  * \return pointer to the page structure.
74  *
75  * Find the right map and if it's AGP memory find the real physical page to
76  * map, get the page, increment the use count and return it.
77  */
78 #if __OS_HAS_AGP
79 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
80                                                 unsigned long address)
81 {
82         drm_file_t *priv = vma->vm_file->private_data;
83         drm_device_t *dev = priv->head->dev;
84         drm_map_t *map = NULL;
85         drm_map_list_t *r_list;
86         drm_hash_item_t *hash;
87
88         /*
89          * Find the right map
90          */
91         if (!drm_core_has_AGP(dev))
92                 goto vm_nopage_error;
93
94         if (!dev->agp || !dev->agp->cant_use_aperture)
95                 goto vm_nopage_error;
96
97         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
98                 goto vm_nopage_error;
99
100         r_list = drm_hash_entry(hash, drm_map_list_t, hash);
101         map = r_list->map;
102
103         if (map && map->type == _DRM_AGP) {
104                 unsigned long offset = address - vma->vm_start;
105                 unsigned long baddr = map->offset + offset;
106                 struct drm_agp_mem *agpmem;
107                 struct page *page;
108
109 #ifdef __alpha__
110                 /*
111                  * Adjust to a bus-relative address
112                  */
113                 baddr -= dev->hose->mem_space->start;
114 #endif
115
116                 /*
117                  * It's AGP memory - find the real physical page to map
118                  */
119                 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
120                         if (agpmem->bound <= baddr &&
121                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
122                                 break;
123                 }
124
125                 if (!agpmem)
126                         goto vm_nopage_error;
127
128                 /*
129                  * Get the page, inc the use count, and return it
130                  */
131                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
132                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
133                 get_page(page);
134
135                 DRM_DEBUG
136                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
137                      baddr, __va(agpmem->memory->memory[offset]), offset,
138                      page_count(page));
139
140                 return page;
141         }
142       vm_nopage_error:
143         return NOPAGE_SIGBUS;   /* Disallow mremap */
144 }
145 #else                           /* __OS_HAS_AGP */
146 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
147                                                 unsigned long address)
148 {
149         return NOPAGE_SIGBUS;
150 }
151 #endif                          /* __OS_HAS_AGP */
152
153 /**
154  * \c nopage method for shared virtual memory.
155  *
156  * \param vma virtual memory area.
157  * \param address access address.
158  * \return pointer to the page structure.
159  *
160  * Get the the mapping, find the real physical page to map, get the page, and
161  * return it.
162  */
163 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
164                                                     unsigned long address)
165 {
166         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
167         unsigned long offset;
168         unsigned long i;
169         struct page *page;
170
171         if (address > vma->vm_end)
172                 return NOPAGE_SIGBUS;   /* Disallow mremap */
173         if (!map)
174                 return NOPAGE_SIGBUS;   /* Nothing allocated */
175
176         offset = address - vma->vm_start;
177         i = (unsigned long)map->handle + offset;
178         page = (map->type == _DRM_CONSISTENT) ?
179                 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
180         if (!page)
181                 return NOPAGE_SIGBUS;
182         get_page(page);
183
184         DRM_DEBUG("shm_nopage 0x%lx\n", address);
185         return page;
186 }
187
188 /**
189  * \c close method for shared virtual memory.
190  *
191  * \param vma virtual memory area.
192  *
193  * Deletes map information if we are the last
194  * person to close a mapping and it's not in the global maplist.
195  */
196 static void drm_vm_shm_close(struct vm_area_struct *vma)
197 {
198         drm_file_t *priv = vma->vm_file->private_data;
199         drm_device_t *dev = priv->head->dev;
200         drm_vma_entry_t *pt, *prev, *next;
201         drm_map_t *map;
202         drm_map_list_t *r_list;
203         struct list_head *list;
204         int found_maps = 0;
205
206         DRM_DEBUG("0x%08lx,0x%08lx\n",
207                   vma->vm_start, vma->vm_end - vma->vm_start);
208         atomic_dec(&dev->vma_count);
209
210         map = vma->vm_private_data;
211
212         mutex_lock(&dev->struct_mutex);
213         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
214                 next = pt->next;
215                 if (pt->vma->vm_private_data == map)
216                         found_maps++;
217                 if (pt->vma == vma) {
218                         if (prev) {
219                                 prev->next = pt->next;
220                         } else {
221                                 dev->vmalist = pt->next;
222                         }
223                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
224                 } else {
225                         prev = pt;
226                 }
227         }
228         /* We were the only map that was found */
229         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
230                 /* Check to see if we are in the maplist, if we are not, then
231                  * we delete this mappings information.
232                  */
233                 found_maps = 0;
234                 list = &dev->maplist->head;
235                 list_for_each(list, &dev->maplist->head) {
236                         r_list = list_entry(list, drm_map_list_t, head);
237                         if (r_list->map == map)
238                                 found_maps++;
239                 }
240
241                 if (!found_maps) {
242                         drm_dma_handle_t dmah;
243
244                         switch (map->type) {
245                         case _DRM_REGISTERS:
246                         case _DRM_FRAME_BUFFER:
247                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
248                                         int retcode;
249                                         retcode = mtrr_del(map->mtrr,
250                                                            map->offset,
251                                                            map->size);
252                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
253                                 }
254                                 iounmap(map->handle);
255                                 break;
256                         case _DRM_SHM:
257                                 vfree(map->handle);
258                                 break;
259                         case _DRM_AGP:
260                         case _DRM_SCATTER_GATHER:
261                                 break;
262                         case _DRM_CONSISTENT:
263                                 dmah.vaddr = map->handle;
264                                 dmah.busaddr = map->offset;
265                                 dmah.size = map->size;
266                                 __drm_pci_free(dev, &dmah);
267                                 break;
268                         }
269                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
270                 }
271         }
272         mutex_unlock(&dev->struct_mutex);
273 }
274
275 /**
276  * \c nopage method for DMA virtual memory.
277  *
278  * \param vma virtual memory area.
279  * \param address access address.
280  * \return pointer to the page structure.
281  *
282  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
283  */
284 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
285                                                     unsigned long address)
286 {
287         drm_file_t *priv = vma->vm_file->private_data;
288         drm_device_t *dev = priv->head->dev;
289         drm_device_dma_t *dma = dev->dma;
290         unsigned long offset;
291         unsigned long page_nr;
292         struct page *page;
293
294         if (!dma)
295                 return NOPAGE_SIGBUS;   /* Error */
296         if (address > vma->vm_end)
297                 return NOPAGE_SIGBUS;   /* Disallow mremap */
298         if (!dma->pagelist)
299                 return NOPAGE_SIGBUS;   /* Nothing allocated */
300
301         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
302         page_nr = offset >> PAGE_SHIFT;
303         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
304
305         get_page(page);
306
307         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
308         return page;
309 }
310
311 /**
312  * \c nopage method for scatter-gather virtual memory.
313  *
314  * \param vma virtual memory area.
315  * \param address access address.
316  * \return pointer to the page structure.
317  *
318  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
319  */
320 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
321                                                    unsigned long address)
322 {
323         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
324         drm_file_t *priv = vma->vm_file->private_data;
325         drm_device_t *dev = priv->head->dev;
326         drm_sg_mem_t *entry = dev->sg;
327         unsigned long offset;
328         unsigned long map_offset;
329         unsigned long page_offset;
330         struct page *page;
331
332         if (!entry)
333                 return NOPAGE_SIGBUS;   /* Error */
334         if (address > vma->vm_end)
335                 return NOPAGE_SIGBUS;   /* Disallow mremap */
336         if (!entry->pagelist)
337                 return NOPAGE_SIGBUS;   /* Nothing allocated */
338
339         offset = address - vma->vm_start;
340         map_offset = map->offset - (unsigned long)dev->sg->virtual;
341         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
342         page = entry->pagelist[page_offset];
343         get_page(page);
344
345         return page;
346 }
347
348 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
349                                   unsigned long address, int *type)
350 {
351         if (type)
352                 *type = VM_FAULT_MINOR;
353         return drm_do_vm_nopage(vma, address);
354 }
355
356 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
357                                       unsigned long address, int *type)
358 {
359         if (type)
360                 *type = VM_FAULT_MINOR;
361         return drm_do_vm_shm_nopage(vma, address);
362 }
363
364 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
365                                       unsigned long address, int *type)
366 {
367         if (type)
368                 *type = VM_FAULT_MINOR;
369         return drm_do_vm_dma_nopage(vma, address);
370 }
371
372 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
373                                      unsigned long address, int *type)
374 {
375         if (type)
376                 *type = VM_FAULT_MINOR;
377         return drm_do_vm_sg_nopage(vma, address);
378 }
379
380 /** AGP virtual memory operations */
381 static struct vm_operations_struct drm_vm_ops = {
382         .nopage = drm_vm_nopage,
383         .open = drm_vm_open,
384         .close = drm_vm_close,
385 };
386
387 /** Shared virtual memory operations */
388 static struct vm_operations_struct drm_vm_shm_ops = {
389         .nopage = drm_vm_shm_nopage,
390         .open = drm_vm_open,
391         .close = drm_vm_shm_close,
392 };
393
394 /** DMA virtual memory operations */
395 static struct vm_operations_struct drm_vm_dma_ops = {
396         .nopage = drm_vm_dma_nopage,
397         .open = drm_vm_open,
398         .close = drm_vm_close,
399 };
400
401 /** Scatter-gather virtual memory operations */
402 static struct vm_operations_struct drm_vm_sg_ops = {
403         .nopage = drm_vm_sg_nopage,
404         .open = drm_vm_open,
405         .close = drm_vm_close,
406 };
407
408 /**
409  * \c open method for shared virtual memory.
410  *
411  * \param vma virtual memory area.
412  *
413  * Create a new drm_vma_entry structure as the \p vma private data entry and
414  * add it to drm_device::vmalist.
415  */
416 static void drm_vm_open_locked(struct vm_area_struct *vma)
417 {
418         drm_file_t *priv = vma->vm_file->private_data;
419         drm_device_t *dev = priv->head->dev;
420         drm_vma_entry_t *vma_entry;
421
422         DRM_DEBUG("0x%08lx,0x%08lx\n",
423                   vma->vm_start, vma->vm_end - vma->vm_start);
424         atomic_inc(&dev->vma_count);
425
426         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
427         if (vma_entry) {
428                 vma_entry->vma = vma;
429                 vma_entry->next = dev->vmalist;
430                 vma_entry->pid = current->pid;
431                 dev->vmalist = vma_entry;
432         }
433 }
434
435 static void drm_vm_open(struct vm_area_struct *vma)
436 {
437         drm_file_t *priv = vma->vm_file->private_data;
438         drm_device_t *dev = priv->head->dev;
439
440         mutex_lock(&dev->struct_mutex);
441         drm_vm_open_locked(vma);
442         mutex_unlock(&dev->struct_mutex);
443 }
444
445 /**
446  * \c close method for all virtual memory types.
447  *
448  * \param vma virtual memory area.
449  *
450  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
451  * free it.
452  */
453 static void drm_vm_close(struct vm_area_struct *vma)
454 {
455         drm_file_t *priv = vma->vm_file->private_data;
456         drm_device_t *dev = priv->head->dev;
457         drm_vma_entry_t *pt, *prev;
458
459         DRM_DEBUG("0x%08lx,0x%08lx\n",
460                   vma->vm_start, vma->vm_end - vma->vm_start);
461         atomic_dec(&dev->vma_count);
462
463         mutex_lock(&dev->struct_mutex);
464         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
465                 if (pt->vma == vma) {
466                         if (prev) {
467                                 prev->next = pt->next;
468                         } else {
469                                 dev->vmalist = pt->next;
470                         }
471                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
472                         break;
473                 }
474         }
475         mutex_unlock(&dev->struct_mutex);
476 }
477
478 /**
479  * mmap DMA memory.
480  *
481  * \param filp file pointer.
482  * \param vma virtual memory area.
483  * \return zero on success or a negative number on failure.
484  *
485  * Sets the virtual memory area operations structure to vm_dma_ops, the file
486  * pointer, and calls vm_open().
487  */
488 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
489 {
490         drm_file_t *priv = filp->private_data;
491         drm_device_t *dev;
492         drm_device_dma_t *dma;
493         unsigned long length = vma->vm_end - vma->vm_start;
494
495         dev = priv->head->dev;
496         dma = dev->dma;
497         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
498                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
499
500         /* Length must match exact page count */
501         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
502                 return -EINVAL;
503         }
504
505         if (!capable(CAP_SYS_ADMIN) &&
506             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
507                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
508 #if defined(__i386__) || defined(__x86_64__)
509                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
510 #else
511                 /* Ye gads this is ugly.  With more thought
512                    we could move this up higher and use
513                    `protection_map' instead.  */
514                 vma->vm_page_prot =
515                     __pgprot(pte_val
516                              (pte_wrprotect
517                               (__pte(pgprot_val(vma->vm_page_prot)))));
518 #endif
519         }
520
521         vma->vm_ops = &drm_vm_dma_ops;
522
523         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
524
525         vma->vm_file = filp;    /* Needed for drm_vm_open() */
526         drm_vm_open_locked(vma);
527         return 0;
528 }
529
530 unsigned long drm_core_get_map_ofs(drm_map_t * map)
531 {
532         return map->offset;
533 }
534
535 EXPORT_SYMBOL(drm_core_get_map_ofs);
536
537 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
538 {
539 #ifdef __alpha__
540         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
541 #else
542         return 0;
543 #endif
544 }
545
546 EXPORT_SYMBOL(drm_core_get_reg_ofs);
547
548 /**
549  * mmap DMA memory.
550  *
551  * \param filp file pointer.
552  * \param vma virtual memory area.
553  * \return zero on success or a negative number on failure.
554  *
555  * If the virtual memory area has no offset associated with it then it's a DMA
556  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
557  * checks that the restricted flag is not set, sets the virtual memory operations
558  * according to the mapping type and remaps the pages. Finally sets the file
559  * pointer and calls vm_open().
560  */
561 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
562 {
563         drm_file_t *priv = filp->private_data;
564         drm_device_t *dev = priv->head->dev;
565         drm_map_t *map = NULL;
566         unsigned long offset = 0;
567         drm_hash_item_t *hash;
568
569         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
570                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
571
572         if (!priv->authenticated)
573                 return -EACCES;
574
575         /* We check for "dma". On Apple's UniNorth, it's valid to have
576          * the AGP mapped at physical address 0
577          * --BenH.
578          */
579         if (!vma->vm_pgoff
580 #if __OS_HAS_AGP
581             && (!dev->agp
582                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
583 #endif
584             )
585                 return drm_mmap_dma(filp, vma);
586
587         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
588                 DRM_ERROR("Could not find map\n");
589                 return -EINVAL;
590         }
591
592         map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
593         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
594                 return -EPERM;
595
596         /* Check for valid size. */
597         if (map->size < vma->vm_end - vma->vm_start)
598                 return -EINVAL;
599
600         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
601                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
602 #if defined(__i386__) || defined(__x86_64__)
603                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
604 #else
605                 /* Ye gads this is ugly.  With more thought
606                    we could move this up higher and use
607                    `protection_map' instead.  */
608                 vma->vm_page_prot =
609                     __pgprot(pte_val
610                              (pte_wrprotect
611                               (__pte(pgprot_val(vma->vm_page_prot)))));
612 #endif
613         }
614
615         switch (map->type) {
616         case _DRM_AGP:
617                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
618                         /*
619                          * On some platforms we can't talk to bus dma address from the CPU, so for
620                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
621                          * pages and mappings in nopage()
622                          */
623 #if defined(__powerpc__)
624                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
625 #endif
626                         vma->vm_ops = &drm_vm_ops;
627                         break;
628                 }
629                 /* fall through to _DRM_FRAME_BUFFER... */
630         case _DRM_FRAME_BUFFER:
631         case _DRM_REGISTERS:
632                 offset = dev->driver->get_reg_ofs(dev);
633                 vma->vm_flags |= VM_IO; /* not in core dump */
634                 vma->vm_page_prot = drm_io_prot(map->type, vma);
635 #ifdef __sparc__
636                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
637                 if (io_remap_pfn_range(vma, vma->vm_start,
638                                        (map->offset + offset) >> PAGE_SHIFT,
639                                        vma->vm_end - vma->vm_start,
640                                        vma->vm_page_prot))
641 #else
642                 if (io_remap_pfn_range(vma, vma->vm_start,
643                                        (map->offset + offset) >> PAGE_SHIFT,
644                                        vma->vm_end - vma->vm_start,
645                                        vma->vm_page_prot))
646 #endif
647                         return -EAGAIN;
648                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
649                           " offset = 0x%lx\n",
650                           map->type,
651                           vma->vm_start, vma->vm_end, map->offset + offset);
652                 vma->vm_ops = &drm_vm_ops;
653                 break;
654         case _DRM_SHM:
655         case _DRM_CONSISTENT:
656                 /* Consistent memory is really like shared memory. It's only
657                  * allocate in a different way */
658                 vma->vm_ops = &drm_vm_shm_ops;
659                 vma->vm_private_data = (void *)map;
660                 /* Don't let this area swap.  Change when
661                    DRM_KERNEL advisory is supported. */
662                 vma->vm_flags |= VM_RESERVED;
663                 break;
664         case _DRM_SCATTER_GATHER:
665                 vma->vm_ops = &drm_vm_sg_ops;
666                 vma->vm_private_data = (void *)map;
667                 vma->vm_flags |= VM_RESERVED;
668                 break;
669         default:
670                 return -EINVAL; /* This should never happen. */
671         }
672         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
673
674         vma->vm_file = filp;    /* Needed for drm_vm_open() */
675         drm_vm_open_locked(vma);
676         return 0;
677 }
678
679 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
680 {
681         drm_file_t *priv = filp->private_data;
682         drm_device_t *dev = priv->head->dev;
683         int ret;
684
685         mutex_lock(&dev->struct_mutex);
686         ret = drm_mmap_locked(filp, vma);
687         mutex_unlock(&dev->struct_mutex);
688
689         return ret;
690 }
691 EXPORT_SYMBOL(drm_mmap);