drm: fix warning on 64-bit platforms..
[safe/jmp/linux-2.6] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
40 {
41         return pci_resource_start(dev->pdev, resource);
42 }
43
44 EXPORT_SYMBOL(drm_get_resource_start);
45
46 unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
47 {
48         return pci_resource_len(dev->pdev, resource);
49 }
50
51 EXPORT_SYMBOL(drm_get_resource_len);
52
53 static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
54                                              drm_local_map_t * map)
55 {
56         struct list_head *list;
57
58         list_for_each(list, &dev->maplist->head) {
59                 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
60                 if (entry->map && map->type == entry->map->type &&
61                     entry->map->offset == map->offset) {
62                         return entry;
63                 }
64         }
65
66         return NULL;
67 }
68
69 /*
70  * Used to allocate 32-bit handles for mappings.
71  */
72 #define START_RANGE 0x10000000
73 #define END_RANGE 0x40000000
74
75 #ifdef _LP64
76 static __inline__ unsigned int HandleID(unsigned long lhandle,
77                                         drm_device_t * dev)
78 {
79         static unsigned int map32_handle = START_RANGE;
80         unsigned int hash;
81
82         if (lhandle & 0xffffffff00000000) {
83                 hash = map32_handle;
84                 map32_handle += PAGE_SIZE;
85                 if (map32_handle > END_RANGE)
86                         map32_handle = START_RANGE;
87         } else
88                 hash = lhandle;
89
90         while (1) {
91                 drm_map_list_t *_entry;
92                 list_for_each_entry(_entry, &dev->maplist->head, head) {
93                         if (_entry->user_token == hash)
94                                 break;
95                 }
96                 if (&_entry->head == &dev->maplist->head)
97                         return hash;
98
99                 hash += PAGE_SIZE;
100                 map32_handle += PAGE_SIZE;
101         }
102 }
103 #else
104 # define HandleID(x,dev) (unsigned int)(x)
105 #endif
106
107 /**
108  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
109  *
110  * \param inode device inode.
111  * \param filp file pointer.
112  * \param cmd command.
113  * \param arg pointer to a drm_map structure.
114  * \return zero on success or a negative value on error.
115  *
116  * Adjusts the memory offset to its absolute value according to the mapping
117  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
118  * applicable and if supported by the kernel.
119  */
120 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
121                            unsigned int size, drm_map_type_t type,
122                            drm_map_flags_t flags, drm_map_list_t ** maplist)
123 {
124         drm_map_t *map;
125         drm_map_list_t *list;
126         drm_dma_handle_t *dmah;
127
128         map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
129         if (!map)
130                 return -ENOMEM;
131
132         map->offset = offset;
133         map->size = size;
134         map->flags = flags;
135         map->type = type;
136
137         /* Only allow shared memory to be removable since we only keep enough
138          * book keeping information about shared memory to allow for removal
139          * when processes fork.
140          */
141         if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
142                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
143                 return -EINVAL;
144         }
145         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
146                   map->offset, map->size, map->type);
147         if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
148                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
149                 return -EINVAL;
150         }
151         map->mtrr = -1;
152         map->handle = NULL;
153
154         switch (map->type) {
155         case _DRM_REGISTERS:
156         case _DRM_FRAME_BUFFER:
157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158                 if (map->offset + map->size < map->offset ||
159                     map->offset < virt_to_phys(high_memory)) {
160                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
161                         return -EINVAL;
162                 }
163 #endif
164 #ifdef __alpha__
165                 map->offset += dev->hose->mem_space->start;
166 #endif
167                 /* Some drivers preinitialize some maps, without the X Server
168                  * needing to be aware of it.  Therefore, we just return success
169                  * when the server tries to create a duplicate map.
170                  */
171                 list = drm_find_matching_map(dev, map);
172                 if (list != NULL) {
173                         if (list->map->size != map->size) {
174                                 DRM_DEBUG("Matching maps of type %d with "
175                                           "mismatched sizes, (%ld vs %ld)\n",
176                                           map->type, map->size,
177                                           list->map->size);
178                                 list->map->size = map->size;
179                         }
180
181                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
182                         *maplist = list;
183                         return 0;
184                 }
185
186                 if (drm_core_has_MTRR(dev)) {
187                         if (map->type == _DRM_FRAME_BUFFER ||
188                             (map->flags & _DRM_WRITE_COMBINING)) {
189                                 map->mtrr = mtrr_add(map->offset, map->size,
190                                                      MTRR_TYPE_WRCOMB, 1);
191                         }
192                 }
193                 if (map->type == _DRM_REGISTERS)
194                         map->handle = drm_ioremap(map->offset, map->size, dev);
195                 break;
196
197         case _DRM_SHM:
198                 map->handle = vmalloc_32(map->size);
199                 DRM_DEBUG("%lu %d %p\n",
200                           map->size, drm_order(map->size), map->handle);
201                 if (!map->handle) {
202                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
203                         return -ENOMEM;
204                 }
205                 map->offset = (unsigned long)map->handle;
206                 if (map->flags & _DRM_CONTAINS_LOCK) {
207                         /* Prevent a 2nd X Server from creating a 2nd lock */
208                         if (dev->lock.hw_lock != NULL) {
209                                 vfree(map->handle);
210                                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211                                 return -EBUSY;
212                         }
213                         dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
214                 }
215                 break;
216         case _DRM_AGP:
217                 if (drm_core_has_AGP(dev)) {
218 #ifdef __alpha__
219                         map->offset += dev->hose->mem_space->start;
220 #endif
221                         map->offset += dev->agp->base;
222                         map->mtrr = dev->agp->agp_mtrr; /* for getmap */
223                 }
224                 break;
225         case _DRM_SCATTER_GATHER:
226                 if (!dev->sg) {
227                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
228                         return -EINVAL;
229                 }
230                 map->offset += (unsigned long)dev->sg->virtual;
231                 break;
232         case _DRM_CONSISTENT:
233                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
234                  * As we're limiting the address to 2^32-1 (or less),
235                  * casting it down to 32 bits is no problem, but we
236                  * need to point to a 64bit variable first. */
237                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
238                 if (!dmah) {
239                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
240                         return -ENOMEM;
241                 }
242                 map->handle = dmah->vaddr;
243                 map->offset = (unsigned long)dmah->busaddr;
244                 kfree(dmah);
245                 break;
246         default:
247                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
248                 return -EINVAL;
249         }
250
251         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
252         if (!list) {
253                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
254                 return -EINVAL;
255         }
256         memset(list, 0, sizeof(*list));
257         list->map = map;
258
259         down(&dev->struct_sem);
260         list_add(&list->head, &dev->maplist->head);
261         /* Assign a 32-bit handle */
262         /* We do it here so that dev->struct_sem protects the increment */
263         list->user_token = HandleID(map->type == _DRM_SHM
264                                     ? (unsigned long)map->handle
265                                     : map->offset, dev);
266         up(&dev->struct_sem);
267
268         *maplist = list;
269         return 0;
270 }
271
272 int drm_addmap(drm_device_t * dev, unsigned int offset,
273                unsigned int size, drm_map_type_t type,
274                drm_map_flags_t flags, drm_local_map_t ** map_ptr)
275 {
276         drm_map_list_t *list;
277         int rc;
278
279         rc = drm_addmap_core(dev, offset, size, type, flags, &list);
280         if (!rc)
281                 *map_ptr = list->map;
282         return rc;
283 }
284
285 EXPORT_SYMBOL(drm_addmap);
286
287 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
288                      unsigned int cmd, unsigned long arg)
289 {
290         drm_file_t *priv = filp->private_data;
291         drm_device_t *dev = priv->head->dev;
292         drm_map_t map;
293         drm_map_list_t *maplist;
294         drm_map_t __user *argp = (void __user *)arg;
295         int err;
296
297         if (!(filp->f_mode & 3))
298                 return -EACCES; /* Require read/write */
299
300         if (copy_from_user(&map, argp, sizeof(map))) {
301                 return -EFAULT;
302         }
303
304         err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
305                               &maplist);
306
307         if (err)
308                 return err;
309
310         if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
311                 return -EFAULT;
312
313         /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
314         if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
315                 return -EFAULT;
316         return 0;
317 }
318
319 /**
320  * Remove a map private from list and deallocate resources if the mapping
321  * isn't in use.
322  *
323  * \param inode device inode.
324  * \param filp file pointer.
325  * \param cmd command.
326  * \param arg pointer to a drm_map_t structure.
327  * \return zero on success or a negative value on error.
328  *
329  * Searches the map on drm_device::maplist, removes it from the list, see if
330  * its being used, and free any associate resource (such as MTRR's) if it's not
331  * being on use.
332  *
333  * \sa drm_addmap
334  */
335 int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
336 {
337         struct list_head *list;
338         drm_map_list_t *r_list = NULL;
339         drm_dma_handle_t dmah;
340
341         /* Find the list entry for the map and remove it */
342         list_for_each(list, &dev->maplist->head) {
343                 r_list = list_entry(list, drm_map_list_t, head);
344
345                 if (r_list->map == map) {
346                         list_del(list);
347                         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
348                         break;
349                 }
350         }
351
352         /* List has wrapped around to the head pointer, or it's empty and we
353          * didn't find anything.
354          */
355         if (list == (&dev->maplist->head)) {
356                 return -EINVAL;
357         }
358
359         switch (map->type) {
360         case _DRM_REGISTERS:
361                 drm_ioremapfree(map->handle, map->size, dev);
362                 /* FALLTHROUGH */
363         case _DRM_FRAME_BUFFER:
364                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
365                         int retcode;
366                         retcode = mtrr_del(map->mtrr, map->offset, map->size);
367                         DRM_DEBUG("mtrr_del=%d\n", retcode);
368                 }
369                 break;
370         case _DRM_SHM:
371                 vfree(map->handle);
372                 break;
373         case _DRM_AGP:
374         case _DRM_SCATTER_GATHER:
375                 break;
376         case _DRM_CONSISTENT:
377                 dmah.vaddr = map->handle;
378                 dmah.busaddr = map->offset;
379                 dmah.size = map->size;
380                 __drm_pci_free(dev, &dmah);
381                 break;
382         }
383         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
384
385         return 0;
386 }
387
388 EXPORT_SYMBOL(drm_rmmap_locked);
389
390 int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
391 {
392         int ret;
393
394         down(&dev->struct_sem);
395         ret = drm_rmmap_locked(dev, map);
396         up(&dev->struct_sem);
397
398         return ret;
399 }
400
401 EXPORT_SYMBOL(drm_rmmap);
402
403 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
404  * the last close of the device, and this is necessary for cleanup when things
405  * exit uncleanly.  Therefore, having userland manually remove mappings seems
406  * like a pointless exercise since they're going away anyway.
407  *
408  * One use case might be after addmap is allowed for normal users for SHM and
409  * gets used by drivers that the server doesn't need to care about.  This seems
410  * unlikely.
411  */
412 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
413                     unsigned int cmd, unsigned long arg)
414 {
415         drm_file_t *priv = filp->private_data;
416         drm_device_t *dev = priv->head->dev;
417         drm_map_t request;
418         drm_local_map_t *map = NULL;
419         struct list_head *list;
420         int ret;
421
422         if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
423                 return -EFAULT;
424         }
425
426         down(&dev->struct_sem);
427         list_for_each(list, &dev->maplist->head) {
428                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
429
430                 if (r_list->map &&
431                     r_list->user_token == (unsigned long)request.handle &&
432                     r_list->map->flags & _DRM_REMOVABLE) {
433                         map = r_list->map;
434                         break;
435                 }
436         }
437
438         /* List has wrapped around to the head pointer, or its empty we didn't
439          * find anything.
440          */
441         if (list == (&dev->maplist->head)) {
442                 up(&dev->struct_sem);
443                 return -EINVAL;
444         }
445
446         if (!map)
447                 return -EINVAL;
448
449         /* Register and framebuffer maps are permanent */
450         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
451                 up(&dev->struct_sem);
452                 return 0;
453         }
454
455         ret = drm_rmmap_locked(dev, map);
456
457         up(&dev->struct_sem);
458
459         return ret;
460 }
461
462 /**
463  * Cleanup after an error on one of the addbufs() functions.
464  *
465  * \param dev DRM device.
466  * \param entry buffer entry where the error occurred.
467  *
468  * Frees any pages and buffers associated with the given entry.
469  */
470 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
471 {
472         int i;
473
474         if (entry->seg_count) {
475                 for (i = 0; i < entry->seg_count; i++) {
476                         if (entry->seglist[i]) {
477                                 drm_free_pages(entry->seglist[i],
478                                                entry->page_order, DRM_MEM_DMA);
479                         }
480                 }
481                 drm_free(entry->seglist,
482                          entry->seg_count *
483                          sizeof(*entry->seglist), DRM_MEM_SEGS);
484
485                 entry->seg_count = 0;
486         }
487
488         if (entry->buf_count) {
489                 for (i = 0; i < entry->buf_count; i++) {
490                         if (entry->buflist[i].dev_private) {
491                                 drm_free(entry->buflist[i].dev_private,
492                                          entry->buflist[i].dev_priv_size,
493                                          DRM_MEM_BUFS);
494                         }
495                 }
496                 drm_free(entry->buflist,
497                          entry->buf_count *
498                          sizeof(*entry->buflist), DRM_MEM_BUFS);
499
500                 entry->buf_count = 0;
501         }
502 }
503
504 #if __OS_HAS_AGP
505 /**
506  * Add AGP buffers for DMA transfers.
507  *
508  * \param dev drm_device_t to which the buffers are to be added.
509  * \param request pointer to a drm_buf_desc_t describing the request.
510  * \return zero on success or a negative number on failure.
511  *
512  * After some sanity checks creates a drm_buf structure for each buffer and
513  * reallocates the buffer list of the same size order to accommodate the new
514  * buffers.
515  */
516 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
517 {
518         drm_device_dma_t *dma = dev->dma;
519         drm_buf_entry_t *entry;
520         drm_buf_t *buf;
521         unsigned long offset;
522         unsigned long agp_offset;
523         int count;
524         int order;
525         int size;
526         int alignment;
527         int page_order;
528         int total;
529         int byte_count;
530         int i;
531         drm_buf_t **temp_buflist;
532
533         if (!dma)
534                 return -EINVAL;
535
536         count = request->count;
537         order = drm_order(request->size);
538         size = 1 << order;
539
540         alignment = (request->flags & _DRM_PAGE_ALIGN)
541             ? PAGE_ALIGN(size) : size;
542         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
543         total = PAGE_SIZE << page_order;
544
545         byte_count = 0;
546         agp_offset = dev->agp->base + request->agp_start;
547
548         DRM_DEBUG("count:      %d\n", count);
549         DRM_DEBUG("order:      %d\n", order);
550         DRM_DEBUG("size:       %d\n", size);
551         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
552         DRM_DEBUG("alignment:  %d\n", alignment);
553         DRM_DEBUG("page_order: %d\n", page_order);
554         DRM_DEBUG("total:      %d\n", total);
555
556         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
557                 return -EINVAL;
558         if (dev->queue_count)
559                 return -EBUSY;  /* Not while in use */
560
561         spin_lock(&dev->count_lock);
562         if (dev->buf_use) {
563                 spin_unlock(&dev->count_lock);
564                 return -EBUSY;
565         }
566         atomic_inc(&dev->buf_alloc);
567         spin_unlock(&dev->count_lock);
568
569         down(&dev->struct_sem);
570         entry = &dma->bufs[order];
571         if (entry->buf_count) {
572                 up(&dev->struct_sem);
573                 atomic_dec(&dev->buf_alloc);
574                 return -ENOMEM; /* May only call once for each order */
575         }
576
577         if (count < 0 || count > 4096) {
578                 up(&dev->struct_sem);
579                 atomic_dec(&dev->buf_alloc);
580                 return -EINVAL;
581         }
582
583         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
584                                    DRM_MEM_BUFS);
585         if (!entry->buflist) {
586                 up(&dev->struct_sem);
587                 atomic_dec(&dev->buf_alloc);
588                 return -ENOMEM;
589         }
590         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
591
592         entry->buf_size = size;
593         entry->page_order = page_order;
594
595         offset = 0;
596
597         while (entry->buf_count < count) {
598                 buf = &entry->buflist[entry->buf_count];
599                 buf->idx = dma->buf_count + entry->buf_count;
600                 buf->total = alignment;
601                 buf->order = order;
602                 buf->used = 0;
603
604                 buf->offset = (dma->byte_count + offset);
605                 buf->bus_address = agp_offset + offset;
606                 buf->address = (void *)(agp_offset + offset);
607                 buf->next = NULL;
608                 buf->waiting = 0;
609                 buf->pending = 0;
610                 init_waitqueue_head(&buf->dma_wait);
611                 buf->filp = NULL;
612
613                 buf->dev_priv_size = dev->driver->dev_priv_size;
614                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
615                 if (!buf->dev_private) {
616                         /* Set count correctly so we free the proper amount. */
617                         entry->buf_count = count;
618                         drm_cleanup_buf_error(dev, entry);
619                         up(&dev->struct_sem);
620                         atomic_dec(&dev->buf_alloc);
621                         return -ENOMEM;
622                 }
623                 memset(buf->dev_private, 0, buf->dev_priv_size);
624
625                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
626
627                 offset += alignment;
628                 entry->buf_count++;
629                 byte_count += PAGE_SIZE << page_order;
630         }
631
632         DRM_DEBUG("byte_count: %d\n", byte_count);
633
634         temp_buflist = drm_realloc(dma->buflist,
635                                    dma->buf_count * sizeof(*dma->buflist),
636                                    (dma->buf_count + entry->buf_count)
637                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
638         if (!temp_buflist) {
639                 /* Free the entry because it isn't valid */
640                 drm_cleanup_buf_error(dev, entry);
641                 up(&dev->struct_sem);
642                 atomic_dec(&dev->buf_alloc);
643                 return -ENOMEM;
644         }
645         dma->buflist = temp_buflist;
646
647         for (i = 0; i < entry->buf_count; i++) {
648                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
649         }
650
651         dma->buf_count += entry->buf_count;
652         dma->byte_count += byte_count;
653
654         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
655         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
656
657         up(&dev->struct_sem);
658
659         request->count = entry->buf_count;
660         request->size = size;
661
662         dma->flags = _DRM_DMA_USE_AGP;
663
664         atomic_dec(&dev->buf_alloc);
665         return 0;
666 }
667
668 EXPORT_SYMBOL(drm_addbufs_agp);
669 #endif                          /* __OS_HAS_AGP */
670
671 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
672 {
673         drm_device_dma_t *dma = dev->dma;
674         int count;
675         int order;
676         int size;
677         int total;
678         int page_order;
679         drm_buf_entry_t *entry;
680         unsigned long page;
681         drm_buf_t *buf;
682         int alignment;
683         unsigned long offset;
684         int i;
685         int byte_count;
686         int page_count;
687         unsigned long *temp_pagelist;
688         drm_buf_t **temp_buflist;
689
690         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
691                 return -EINVAL;
692         if (!dma)
693                 return -EINVAL;
694
695         count = request->count;
696         order = drm_order(request->size);
697         size = 1 << order;
698
699         DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
700                   request->count, request->size, size, order, dev->queue_count);
701
702         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
703                 return -EINVAL;
704         if (dev->queue_count)
705                 return -EBUSY;  /* Not while in use */
706
707         alignment = (request->flags & _DRM_PAGE_ALIGN)
708             ? PAGE_ALIGN(size) : size;
709         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710         total = PAGE_SIZE << page_order;
711
712         spin_lock(&dev->count_lock);
713         if (dev->buf_use) {
714                 spin_unlock(&dev->count_lock);
715                 return -EBUSY;
716         }
717         atomic_inc(&dev->buf_alloc);
718         spin_unlock(&dev->count_lock);
719
720         down(&dev->struct_sem);
721         entry = &dma->bufs[order];
722         if (entry->buf_count) {
723                 up(&dev->struct_sem);
724                 atomic_dec(&dev->buf_alloc);
725                 return -ENOMEM; /* May only call once for each order */
726         }
727
728         if (count < 0 || count > 4096) {
729                 up(&dev->struct_sem);
730                 atomic_dec(&dev->buf_alloc);
731                 return -EINVAL;
732         }
733
734         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
735                                    DRM_MEM_BUFS);
736         if (!entry->buflist) {
737                 up(&dev->struct_sem);
738                 atomic_dec(&dev->buf_alloc);
739                 return -ENOMEM;
740         }
741         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
742
743         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
744                                    DRM_MEM_SEGS);
745         if (!entry->seglist) {
746                 drm_free(entry->buflist,
747                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
748                 up(&dev->struct_sem);
749                 atomic_dec(&dev->buf_alloc);
750                 return -ENOMEM;
751         }
752         memset(entry->seglist, 0, count * sizeof(*entry->seglist));
753
754         /* Keep the original pagelist until we know all the allocations
755          * have succeeded
756          */
757         temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
758                                   * sizeof(*dma->pagelist), DRM_MEM_PAGES);
759         if (!temp_pagelist) {
760                 drm_free(entry->buflist,
761                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
762                 drm_free(entry->seglist,
763                          count * sizeof(*entry->seglist), DRM_MEM_SEGS);
764                 up(&dev->struct_sem);
765                 atomic_dec(&dev->buf_alloc);
766                 return -ENOMEM;
767         }
768         memcpy(temp_pagelist,
769                dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
770         DRM_DEBUG("pagelist: %d entries\n",
771                   dma->page_count + (count << page_order));
772
773         entry->buf_size = size;
774         entry->page_order = page_order;
775         byte_count = 0;
776         page_count = 0;
777
778         while (entry->buf_count < count) {
779                 page = drm_alloc_pages(page_order, DRM_MEM_DMA);
780                 if (!page) {
781                         /* Set count correctly so we free the proper amount. */
782                         entry->buf_count = count;
783                         entry->seg_count = count;
784                         drm_cleanup_buf_error(dev, entry);
785                         drm_free(temp_pagelist,
786                                  (dma->page_count + (count << page_order))
787                                  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
788                         up(&dev->struct_sem);
789                         atomic_dec(&dev->buf_alloc);
790                         return -ENOMEM;
791                 }
792                 entry->seglist[entry->seg_count++] = page;
793                 for (i = 0; i < (1 << page_order); i++) {
794                         DRM_DEBUG("page %d @ 0x%08lx\n",
795                                   dma->page_count + page_count,
796                                   page + PAGE_SIZE * i);
797                         temp_pagelist[dma->page_count + page_count++]
798                             = page + PAGE_SIZE * i;
799                 }
800                 for (offset = 0;
801                      offset + size <= total && entry->buf_count < count;
802                      offset += alignment, ++entry->buf_count) {
803                         buf = &entry->buflist[entry->buf_count];
804                         buf->idx = dma->buf_count + entry->buf_count;
805                         buf->total = alignment;
806                         buf->order = order;
807                         buf->used = 0;
808                         buf->offset = (dma->byte_count + byte_count + offset);
809                         buf->address = (void *)(page + offset);
810                         buf->next = NULL;
811                         buf->waiting = 0;
812                         buf->pending = 0;
813                         init_waitqueue_head(&buf->dma_wait);
814                         buf->filp = NULL;
815
816                         buf->dev_priv_size = dev->driver->dev_priv_size;
817                         buf->dev_private = drm_alloc(buf->dev_priv_size,
818                                                      DRM_MEM_BUFS);
819                         if (!buf->dev_private) {
820                                 /* Set count correctly so we free the proper amount. */
821                                 entry->buf_count = count;
822                                 entry->seg_count = count;
823                                 drm_cleanup_buf_error(dev, entry);
824                                 drm_free(temp_pagelist,
825                                          (dma->page_count +
826                                           (count << page_order))
827                                          * sizeof(*dma->pagelist),
828                                          DRM_MEM_PAGES);
829                                 up(&dev->struct_sem);
830                                 atomic_dec(&dev->buf_alloc);
831                                 return -ENOMEM;
832                         }
833                         memset(buf->dev_private, 0, buf->dev_priv_size);
834
835                         DRM_DEBUG("buffer %d @ %p\n",
836                                   entry->buf_count, buf->address);
837                 }
838                 byte_count += PAGE_SIZE << page_order;
839         }
840
841         temp_buflist = drm_realloc(dma->buflist,
842                                    dma->buf_count * sizeof(*dma->buflist),
843                                    (dma->buf_count + entry->buf_count)
844                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
845         if (!temp_buflist) {
846                 /* Free the entry because it isn't valid */
847                 drm_cleanup_buf_error(dev, entry);
848                 drm_free(temp_pagelist,
849                          (dma->page_count + (count << page_order))
850                          * sizeof(*dma->pagelist), DRM_MEM_PAGES);
851                 up(&dev->struct_sem);
852                 atomic_dec(&dev->buf_alloc);
853                 return -ENOMEM;
854         }
855         dma->buflist = temp_buflist;
856
857         for (i = 0; i < entry->buf_count; i++) {
858                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
859         }
860
861         /* No allocations failed, so now we can replace the orginal pagelist
862          * with the new one.
863          */
864         if (dma->page_count) {
865                 drm_free(dma->pagelist,
866                          dma->page_count * sizeof(*dma->pagelist),
867                          DRM_MEM_PAGES);
868         }
869         dma->pagelist = temp_pagelist;
870
871         dma->buf_count += entry->buf_count;
872         dma->seg_count += entry->seg_count;
873         dma->page_count += entry->seg_count << page_order;
874         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
875
876         up(&dev->struct_sem);
877
878         request->count = entry->buf_count;
879         request->size = size;
880
881         atomic_dec(&dev->buf_alloc);
882         return 0;
883
884 }
885
886 EXPORT_SYMBOL(drm_addbufs_pci);
887
888 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
889 {
890         drm_device_dma_t *dma = dev->dma;
891         drm_buf_entry_t *entry;
892         drm_buf_t *buf;
893         unsigned long offset;
894         unsigned long agp_offset;
895         int count;
896         int order;
897         int size;
898         int alignment;
899         int page_order;
900         int total;
901         int byte_count;
902         int i;
903         drm_buf_t **temp_buflist;
904
905         if (!drm_core_check_feature(dev, DRIVER_SG))
906                 return -EINVAL;
907
908         if (!dma)
909                 return -EINVAL;
910
911         count = request->count;
912         order = drm_order(request->size);
913         size = 1 << order;
914
915         alignment = (request->flags & _DRM_PAGE_ALIGN)
916             ? PAGE_ALIGN(size) : size;
917         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
918         total = PAGE_SIZE << page_order;
919
920         byte_count = 0;
921         agp_offset = request->agp_start;
922
923         DRM_DEBUG("count:      %d\n", count);
924         DRM_DEBUG("order:      %d\n", order);
925         DRM_DEBUG("size:       %d\n", size);
926         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
927         DRM_DEBUG("alignment:  %d\n", alignment);
928         DRM_DEBUG("page_order: %d\n", page_order);
929         DRM_DEBUG("total:      %d\n", total);
930
931         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
932                 return -EINVAL;
933         if (dev->queue_count)
934                 return -EBUSY;  /* Not while in use */
935
936         spin_lock(&dev->count_lock);
937         if (dev->buf_use) {
938                 spin_unlock(&dev->count_lock);
939                 return -EBUSY;
940         }
941         atomic_inc(&dev->buf_alloc);
942         spin_unlock(&dev->count_lock);
943
944         down(&dev->struct_sem);
945         entry = &dma->bufs[order];
946         if (entry->buf_count) {
947                 up(&dev->struct_sem);
948                 atomic_dec(&dev->buf_alloc);
949                 return -ENOMEM; /* May only call once for each order */
950         }
951
952         if (count < 0 || count > 4096) {
953                 up(&dev->struct_sem);
954                 atomic_dec(&dev->buf_alloc);
955                 return -EINVAL;
956         }
957
958         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
959                                    DRM_MEM_BUFS);
960         if (!entry->buflist) {
961                 up(&dev->struct_sem);
962                 atomic_dec(&dev->buf_alloc);
963                 return -ENOMEM;
964         }
965         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
966
967         entry->buf_size = size;
968         entry->page_order = page_order;
969
970         offset = 0;
971
972         while (entry->buf_count < count) {
973                 buf = &entry->buflist[entry->buf_count];
974                 buf->idx = dma->buf_count + entry->buf_count;
975                 buf->total = alignment;
976                 buf->order = order;
977                 buf->used = 0;
978
979                 buf->offset = (dma->byte_count + offset);
980                 buf->bus_address = agp_offset + offset;
981                 buf->address = (void *)(agp_offset + offset
982                                         + (unsigned long)dev->sg->virtual);
983                 buf->next = NULL;
984                 buf->waiting = 0;
985                 buf->pending = 0;
986                 init_waitqueue_head(&buf->dma_wait);
987                 buf->filp = NULL;
988
989                 buf->dev_priv_size = dev->driver->dev_priv_size;
990                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
991                 if (!buf->dev_private) {
992                         /* Set count correctly so we free the proper amount. */
993                         entry->buf_count = count;
994                         drm_cleanup_buf_error(dev, entry);
995                         up(&dev->struct_sem);
996                         atomic_dec(&dev->buf_alloc);
997                         return -ENOMEM;
998                 }
999
1000                 memset(buf->dev_private, 0, buf->dev_priv_size);
1001
1002                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1003
1004                 offset += alignment;
1005                 entry->buf_count++;
1006                 byte_count += PAGE_SIZE << page_order;
1007         }
1008
1009         DRM_DEBUG("byte_count: %d\n", byte_count);
1010
1011         temp_buflist = drm_realloc(dma->buflist,
1012                                    dma->buf_count * sizeof(*dma->buflist),
1013                                    (dma->buf_count + entry->buf_count)
1014                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1015         if (!temp_buflist) {
1016                 /* Free the entry because it isn't valid */
1017                 drm_cleanup_buf_error(dev, entry);
1018                 up(&dev->struct_sem);
1019                 atomic_dec(&dev->buf_alloc);
1020                 return -ENOMEM;
1021         }
1022         dma->buflist = temp_buflist;
1023
1024         for (i = 0; i < entry->buf_count; i++) {
1025                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1026         }
1027
1028         dma->buf_count += entry->buf_count;
1029         dma->byte_count += byte_count;
1030
1031         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1032         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1033
1034         up(&dev->struct_sem);
1035
1036         request->count = entry->buf_count;
1037         request->size = size;
1038
1039         dma->flags = _DRM_DMA_USE_SG;
1040
1041         atomic_dec(&dev->buf_alloc);
1042         return 0;
1043 }
1044
1045 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1046 {
1047         drm_device_dma_t *dma = dev->dma;
1048         drm_buf_entry_t *entry;
1049         drm_buf_t *buf;
1050         unsigned long offset;
1051         unsigned long agp_offset;
1052         int count;
1053         int order;
1054         int size;
1055         int alignment;
1056         int page_order;
1057         int total;
1058         int byte_count;
1059         int i;
1060         drm_buf_t **temp_buflist;
1061
1062         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1063                 return -EINVAL;
1064
1065         if (!dma)
1066                 return -EINVAL;
1067
1068         count = request->count;
1069         order = drm_order(request->size);
1070         size = 1 << order;
1071
1072         alignment = (request->flags & _DRM_PAGE_ALIGN)
1073             ? PAGE_ALIGN(size) : size;
1074         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1075         total = PAGE_SIZE << page_order;
1076
1077         byte_count = 0;
1078         agp_offset = request->agp_start;
1079
1080         DRM_DEBUG("count:      %d\n", count);
1081         DRM_DEBUG("order:      %d\n", order);
1082         DRM_DEBUG("size:       %d\n", size);
1083         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1084         DRM_DEBUG("alignment:  %d\n", alignment);
1085         DRM_DEBUG("page_order: %d\n", page_order);
1086         DRM_DEBUG("total:      %d\n", total);
1087
1088         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1089                 return -EINVAL;
1090         if (dev->queue_count)
1091                 return -EBUSY;  /* Not while in use */
1092
1093         spin_lock(&dev->count_lock);
1094         if (dev->buf_use) {
1095                 spin_unlock(&dev->count_lock);
1096                 return -EBUSY;
1097         }
1098         atomic_inc(&dev->buf_alloc);
1099         spin_unlock(&dev->count_lock);
1100
1101         down(&dev->struct_sem);
1102         entry = &dma->bufs[order];
1103         if (entry->buf_count) {
1104                 up(&dev->struct_sem);
1105                 atomic_dec(&dev->buf_alloc);
1106                 return -ENOMEM; /* May only call once for each order */
1107         }
1108
1109         if (count < 0 || count > 4096) {
1110                 up(&dev->struct_sem);
1111                 atomic_dec(&dev->buf_alloc);
1112                 return -EINVAL;
1113         }
1114
1115         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1116                                    DRM_MEM_BUFS);
1117         if (!entry->buflist) {
1118                 up(&dev->struct_sem);
1119                 atomic_dec(&dev->buf_alloc);
1120                 return -ENOMEM;
1121         }
1122         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1123
1124         entry->buf_size = size;
1125         entry->page_order = page_order;
1126
1127         offset = 0;
1128
1129         while (entry->buf_count < count) {
1130                 buf = &entry->buflist[entry->buf_count];
1131                 buf->idx = dma->buf_count + entry->buf_count;
1132                 buf->total = alignment;
1133                 buf->order = order;
1134                 buf->used = 0;
1135
1136                 buf->offset = (dma->byte_count + offset);
1137                 buf->bus_address = agp_offset + offset;
1138                 buf->address = (void *)(agp_offset + offset);
1139                 buf->next = NULL;
1140                 buf->waiting = 0;
1141                 buf->pending = 0;
1142                 init_waitqueue_head(&buf->dma_wait);
1143                 buf->filp = NULL;
1144
1145                 buf->dev_priv_size = dev->driver->dev_priv_size;
1146                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1147                 if (!buf->dev_private) {
1148                         /* Set count correctly so we free the proper amount. */
1149                         entry->buf_count = count;
1150                         drm_cleanup_buf_error(dev, entry);
1151                         up(&dev->struct_sem);
1152                         atomic_dec(&dev->buf_alloc);
1153                         return -ENOMEM;
1154                 }
1155                 memset(buf->dev_private, 0, buf->dev_priv_size);
1156
1157                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1158
1159                 offset += alignment;
1160                 entry->buf_count++;
1161                 byte_count += PAGE_SIZE << page_order;
1162         }
1163
1164         DRM_DEBUG("byte_count: %d\n", byte_count);
1165
1166         temp_buflist = drm_realloc(dma->buflist,
1167                                    dma->buf_count * sizeof(*dma->buflist),
1168                                    (dma->buf_count + entry->buf_count)
1169                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1170         if (!temp_buflist) {
1171                 /* Free the entry because it isn't valid */
1172                 drm_cleanup_buf_error(dev, entry);
1173                 up(&dev->struct_sem);
1174                 atomic_dec(&dev->buf_alloc);
1175                 return -ENOMEM;
1176         }
1177         dma->buflist = temp_buflist;
1178
1179         for (i = 0; i < entry->buf_count; i++) {
1180                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1181         }
1182
1183         dma->buf_count += entry->buf_count;
1184         dma->byte_count += byte_count;
1185
1186         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1187         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1188
1189         up(&dev->struct_sem);
1190
1191         request->count = entry->buf_count;
1192         request->size = size;
1193
1194         dma->flags = _DRM_DMA_USE_FB;
1195
1196         atomic_dec(&dev->buf_alloc);
1197         return 0;
1198 }
1199
1200 /**
1201  * Add buffers for DMA transfers (ioctl).
1202  *
1203  * \param inode device inode.
1204  * \param filp file pointer.
1205  * \param cmd command.
1206  * \param arg pointer to a drm_buf_desc_t request.
1207  * \return zero on success or a negative number on failure.
1208  *
1209  * According with the memory type specified in drm_buf_desc::flags and the
1210  * build options, it dispatches the call either to addbufs_agp(),
1211  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1212  * PCI memory respectively.
1213  */
1214 int drm_addbufs(struct inode *inode, struct file *filp,
1215                 unsigned int cmd, unsigned long arg)
1216 {
1217         drm_buf_desc_t request;
1218         drm_file_t *priv = filp->private_data;
1219         drm_device_t *dev = priv->head->dev;
1220         int ret;
1221
1222         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1223                 return -EINVAL;
1224
1225         if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1226                            sizeof(request)))
1227                 return -EFAULT;
1228
1229 #if __OS_HAS_AGP
1230         if (request.flags & _DRM_AGP_BUFFER)
1231                 ret = drm_addbufs_agp(dev, &request);
1232         else
1233 #endif
1234         if (request.flags & _DRM_SG_BUFFER)
1235                 ret = drm_addbufs_sg(dev, &request);
1236         else if (request.flags & _DRM_FB_BUFFER)
1237                 ret = drm_addbufs_fb(dev, &request);
1238         else
1239                 ret = drm_addbufs_pci(dev, &request);
1240
1241         if (ret == 0) {
1242                 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1243                         ret = -EFAULT;
1244                 }
1245         }
1246         return ret;
1247 }
1248
1249 /**
1250  * Get information about the buffer mappings.
1251  *
1252  * This was originally mean for debugging purposes, or by a sophisticated
1253  * client library to determine how best to use the available buffers (e.g.,
1254  * large buffers can be used for image transfer).
1255  *
1256  * \param inode device inode.
1257  * \param filp file pointer.
1258  * \param cmd command.
1259  * \param arg pointer to a drm_buf_info structure.
1260  * \return zero on success or a negative number on failure.
1261  *
1262  * Increments drm_device::buf_use while holding the drm_device::count_lock
1263  * lock, preventing of allocating more buffers after this call. Information
1264  * about each requested buffer is then copied into user space.
1265  */
1266 int drm_infobufs(struct inode *inode, struct file *filp,
1267                  unsigned int cmd, unsigned long arg)
1268 {
1269         drm_file_t *priv = filp->private_data;
1270         drm_device_t *dev = priv->head->dev;
1271         drm_device_dma_t *dma = dev->dma;
1272         drm_buf_info_t request;
1273         drm_buf_info_t __user *argp = (void __user *)arg;
1274         int i;
1275         int count;
1276
1277         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1278                 return -EINVAL;
1279
1280         if (!dma)
1281                 return -EINVAL;
1282
1283         spin_lock(&dev->count_lock);
1284         if (atomic_read(&dev->buf_alloc)) {
1285                 spin_unlock(&dev->count_lock);
1286                 return -EBUSY;
1287         }
1288         ++dev->buf_use;         /* Can't allocate more after this call */
1289         spin_unlock(&dev->count_lock);
1290
1291         if (copy_from_user(&request, argp, sizeof(request)))
1292                 return -EFAULT;
1293
1294         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1295                 if (dma->bufs[i].buf_count)
1296                         ++count;
1297         }
1298
1299         DRM_DEBUG("count = %d\n", count);
1300
1301         if (request.count >= count) {
1302                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1303                         if (dma->bufs[i].buf_count) {
1304                                 drm_buf_desc_t __user *to =
1305                                     &request.list[count];
1306                                 drm_buf_entry_t *from = &dma->bufs[i];
1307                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1308                                 if (copy_to_user(&to->count,
1309                                                  &from->buf_count,
1310                                                  sizeof(from->buf_count)) ||
1311                                     copy_to_user(&to->size,
1312                                                  &from->buf_size,
1313                                                  sizeof(from->buf_size)) ||
1314                                     copy_to_user(&to->low_mark,
1315                                                  &list->low_mark,
1316                                                  sizeof(list->low_mark)) ||
1317                                     copy_to_user(&to->high_mark,
1318                                                  &list->high_mark,
1319                                                  sizeof(list->high_mark)))
1320                                         return -EFAULT;
1321
1322                                 DRM_DEBUG("%d %d %d %d %d\n",
1323                                           i,
1324                                           dma->bufs[i].buf_count,
1325                                           dma->bufs[i].buf_size,
1326                                           dma->bufs[i].freelist.low_mark,
1327                                           dma->bufs[i].freelist.high_mark);
1328                                 ++count;
1329                         }
1330                 }
1331         }
1332         request.count = count;
1333
1334         if (copy_to_user(argp, &request, sizeof(request)))
1335                 return -EFAULT;
1336
1337         return 0;
1338 }
1339
1340 /**
1341  * Specifies a low and high water mark for buffer allocation
1342  *
1343  * \param inode device inode.
1344  * \param filp file pointer.
1345  * \param cmd command.
1346  * \param arg a pointer to a drm_buf_desc structure.
1347  * \return zero on success or a negative number on failure.
1348  *
1349  * Verifies that the size order is bounded between the admissible orders and
1350  * updates the respective drm_device_dma::bufs entry low and high water mark.
1351  *
1352  * \note This ioctl is deprecated and mostly never used.
1353  */
1354 int drm_markbufs(struct inode *inode, struct file *filp,
1355                  unsigned int cmd, unsigned long arg)
1356 {
1357         drm_file_t *priv = filp->private_data;
1358         drm_device_t *dev = priv->head->dev;
1359         drm_device_dma_t *dma = dev->dma;
1360         drm_buf_desc_t request;
1361         int order;
1362         drm_buf_entry_t *entry;
1363
1364         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1365                 return -EINVAL;
1366
1367         if (!dma)
1368                 return -EINVAL;
1369
1370         if (copy_from_user(&request,
1371                            (drm_buf_desc_t __user *) arg, sizeof(request)))
1372                 return -EFAULT;
1373
1374         DRM_DEBUG("%d, %d, %d\n",
1375                   request.size, request.low_mark, request.high_mark);
1376         order = drm_order(request.size);
1377         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1378                 return -EINVAL;
1379         entry = &dma->bufs[order];
1380
1381         if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1382                 return -EINVAL;
1383         if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1384                 return -EINVAL;
1385
1386         entry->freelist.low_mark = request.low_mark;
1387         entry->freelist.high_mark = request.high_mark;
1388
1389         return 0;
1390 }
1391
1392 /**
1393  * Unreserve the buffers in list, previously reserved using drmDMA.
1394  *
1395  * \param inode device inode.
1396  * \param filp file pointer.
1397  * \param cmd command.
1398  * \param arg pointer to a drm_buf_free structure.
1399  * \return zero on success or a negative number on failure.
1400  *
1401  * Calls free_buffer() for each used buffer.
1402  * This function is primarily used for debugging.
1403  */
1404 int drm_freebufs(struct inode *inode, struct file *filp,
1405                  unsigned int cmd, unsigned long arg)
1406 {
1407         drm_file_t *priv = filp->private_data;
1408         drm_device_t *dev = priv->head->dev;
1409         drm_device_dma_t *dma = dev->dma;
1410         drm_buf_free_t request;
1411         int i;
1412         int idx;
1413         drm_buf_t *buf;
1414
1415         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1416                 return -EINVAL;
1417
1418         if (!dma)
1419                 return -EINVAL;
1420
1421         if (copy_from_user(&request,
1422                            (drm_buf_free_t __user *) arg, sizeof(request)))
1423                 return -EFAULT;
1424
1425         DRM_DEBUG("%d\n", request.count);
1426         for (i = 0; i < request.count; i++) {
1427                 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1428                         return -EFAULT;
1429                 if (idx < 0 || idx >= dma->buf_count) {
1430                         DRM_ERROR("Index %d (of %d max)\n",
1431                                   idx, dma->buf_count - 1);
1432                         return -EINVAL;
1433                 }
1434                 buf = dma->buflist[idx];
1435                 if (buf->filp != filp) {
1436                         DRM_ERROR("Process %d freeing buffer not owned\n",
1437                                   current->pid);
1438                         return -EINVAL;
1439                 }
1440                 drm_free_buffer(dev, buf);
1441         }
1442
1443         return 0;
1444 }
1445
1446 /**
1447  * Maps all of the DMA buffers into client-virtual space (ioctl).
1448  *
1449  * \param inode device inode.
1450  * \param filp file pointer.
1451  * \param cmd command.
1452  * \param arg pointer to a drm_buf_map structure.
1453  * \return zero on success or a negative number on failure.
1454  *
1455  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1456  * about each buffer into user space. The PCI buffers are already mapped on the
1457  * addbufs_pci() call.
1458  */
1459 int drm_mapbufs(struct inode *inode, struct file *filp,
1460                 unsigned int cmd, unsigned long arg)
1461 {
1462         drm_file_t *priv = filp->private_data;
1463         drm_device_t *dev = priv->head->dev;
1464         drm_device_dma_t *dma = dev->dma;
1465         drm_buf_map_t __user *argp = (void __user *)arg;
1466         int retcode = 0;
1467         const int zero = 0;
1468         unsigned long virtual;
1469         unsigned long address;
1470         drm_buf_map_t request;
1471         int i;
1472
1473         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1474                 return -EINVAL;
1475
1476         if (!dma)
1477                 return -EINVAL;
1478
1479         spin_lock(&dev->count_lock);
1480         if (atomic_read(&dev->buf_alloc)) {
1481                 spin_unlock(&dev->count_lock);
1482                 return -EBUSY;
1483         }
1484         dev->buf_use++;         /* Can't allocate more after this call */
1485         spin_unlock(&dev->count_lock);
1486
1487         if (copy_from_user(&request, argp, sizeof(request)))
1488                 return -EFAULT;
1489
1490         if (request.count >= dma->buf_count) {
1491                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1492                     || (drm_core_check_feature(dev, DRIVER_SG)
1493                         && (dma->flags & _DRM_DMA_USE_SG))
1494                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1495                         && (dma->flags & _DRM_DMA_USE_FB))) {
1496                         drm_map_t *map = dev->agp_buffer_map;
1497                         unsigned long token = dev->agp_buffer_token;
1498
1499                         if (!map) {
1500                                 retcode = -EINVAL;
1501                                 goto done;
1502                         }
1503
1504                         down_write(&current->mm->mmap_sem);
1505                         virtual = do_mmap(filp, 0, map->size,
1506                                           PROT_READ | PROT_WRITE,
1507                                           MAP_SHARED, token);
1508                         up_write(&current->mm->mmap_sem);
1509                 } else {
1510                         down_write(&current->mm->mmap_sem);
1511                         virtual = do_mmap(filp, 0, dma->byte_count,
1512                                           PROT_READ | PROT_WRITE,
1513                                           MAP_SHARED, 0);
1514                         up_write(&current->mm->mmap_sem);
1515                 }
1516                 if (virtual > -1024UL) {
1517                         /* Real error */
1518                         retcode = (signed long)virtual;
1519                         goto done;
1520                 }
1521                 request.virtual = (void __user *)virtual;
1522
1523                 for (i = 0; i < dma->buf_count; i++) {
1524                         if (copy_to_user(&request.list[i].idx,
1525                                          &dma->buflist[i]->idx,
1526                                          sizeof(request.list[0].idx))) {
1527                                 retcode = -EFAULT;
1528                                 goto done;
1529                         }
1530                         if (copy_to_user(&request.list[i].total,
1531                                          &dma->buflist[i]->total,
1532                                          sizeof(request.list[0].total))) {
1533                                 retcode = -EFAULT;
1534                                 goto done;
1535                         }
1536                         if (copy_to_user(&request.list[i].used,
1537                                          &zero, sizeof(zero))) {
1538                                 retcode = -EFAULT;
1539                                 goto done;
1540                         }
1541                         address = virtual + dma->buflist[i]->offset;    /* *** */
1542                         if (copy_to_user(&request.list[i].address,
1543                                          &address, sizeof(address))) {
1544                                 retcode = -EFAULT;
1545                                 goto done;
1546                         }
1547                 }
1548         }
1549       done:
1550         request.count = dma->buf_count;
1551         DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1552
1553         if (copy_to_user(argp, &request, sizeof(request)))
1554                 return -EFAULT;
1555
1556         return retcode;
1557 }
1558
1559 /**
1560  * Compute size order.  Returns the exponent of the smaller power of two which
1561  * is greater or equal to given number.
1562  *
1563  * \param size size.
1564  * \return order.
1565  *
1566  * \todo Can be made faster.
1567  */
1568 int drm_order(unsigned long size)
1569 {
1570         int order;
1571         unsigned long tmp;
1572
1573         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1574
1575         if (size & (size - 1))
1576                 ++order;
1577
1578         return order;
1579 }
1580
1581 EXPORT_SYMBOL(drm_order);