drm/vmwgfx: Support older hardware.
[safe/jmp/linux-2.6] / drivers / gpu / drm / vmwgfx / vmwgfx_kms.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_kms.h"
29
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32
33
34 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35 {
36         if (du->cursor_surface)
37                 vmw_surface_unreference(&du->cursor_surface);
38         if (du->cursor_dmabuf)
39                 vmw_dmabuf_unreference(&du->cursor_dmabuf);
40         drm_crtc_cleanup(&du->crtc);
41         drm_encoder_cleanup(&du->encoder);
42         drm_connector_cleanup(&du->connector);
43 }
44
45 /*
46  * Display Unit Cursor functions
47  */
48
49 int vmw_cursor_update_image(struct vmw_private *dev_priv,
50                             u32 *image, u32 width, u32 height,
51                             u32 hotspotX, u32 hotspotY)
52 {
53         struct {
54                 u32 cmd;
55                 SVGAFifoCmdDefineAlphaCursor cursor;
56         } *cmd;
57         u32 image_size = width * height * 4;
58         u32 cmd_size = sizeof(*cmd) + image_size;
59
60         if (!image)
61                 return -EINVAL;
62
63         cmd = vmw_fifo_reserve(dev_priv, cmd_size);
64         if (unlikely(cmd == NULL)) {
65                 DRM_ERROR("Fifo reserve failed.\n");
66                 return -ENOMEM;
67         }
68
69         memset(cmd, 0, sizeof(*cmd));
70
71         memcpy(&cmd[1], image, image_size);
72
73         cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
74         cmd->cursor.id = cpu_to_le32(0);
75         cmd->cursor.width = cpu_to_le32(width);
76         cmd->cursor.height = cpu_to_le32(height);
77         cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
78         cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
79
80         vmw_fifo_commit(dev_priv, cmd_size);
81
82         return 0;
83 }
84
85 void vmw_cursor_update_position(struct vmw_private *dev_priv,
86                                 bool show, int x, int y)
87 {
88         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
89         uint32_t count;
90
91         iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
92         iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
93         iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
94         count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
95         iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
96 }
97
98 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
99                            uint32_t handle, uint32_t width, uint32_t height)
100 {
101         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
102         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
103         struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
104         struct vmw_surface *surface = NULL;
105         struct vmw_dma_buffer *dmabuf = NULL;
106         int ret;
107
108         if (handle) {
109                 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110                                                      handle, &surface);
111                 if (!ret) {
112                         if (!surface->snooper.image) {
113                                 DRM_ERROR("surface not suitable for cursor\n");
114                                 return -EINVAL;
115                         }
116                 } else {
117                         ret = vmw_user_dmabuf_lookup(tfile,
118                                                      handle, &dmabuf);
119                         if (ret) {
120                                 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
121                                 return -EINVAL;
122                         }
123                 }
124         }
125
126         /* takedown old cursor */
127         if (du->cursor_surface) {
128                 du->cursor_surface->snooper.crtc = NULL;
129                 vmw_surface_unreference(&du->cursor_surface);
130         }
131         if (du->cursor_dmabuf)
132                 vmw_dmabuf_unreference(&du->cursor_dmabuf);
133
134         /* setup new image */
135         if (surface) {
136                 /* vmw_user_surface_lookup takes one reference */
137                 du->cursor_surface = surface;
138
139                 du->cursor_surface->snooper.crtc = crtc;
140                 du->cursor_age = du->cursor_surface->snooper.age;
141                 vmw_cursor_update_image(dev_priv, surface->snooper.image,
142                                         64, 64, du->hotspot_x, du->hotspot_y);
143         } else if (dmabuf) {
144                 struct ttm_bo_kmap_obj map;
145                 unsigned long kmap_offset;
146                 unsigned long kmap_num;
147                 void *virtual;
148                 bool dummy;
149
150                 /* vmw_user_surface_lookup takes one reference */
151                 du->cursor_dmabuf = dmabuf;
152
153                 kmap_offset = 0;
154                 kmap_num = (64*64*4) >> PAGE_SHIFT;
155
156                 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
157                 if (unlikely(ret != 0)) {
158                         DRM_ERROR("reserve failed\n");
159                         return -EINVAL;
160                 }
161
162                 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
163                 if (unlikely(ret != 0))
164                         goto err_unreserve;
165
166                 virtual = ttm_kmap_obj_virtual(&map, &dummy);
167                 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
168                                         du->hotspot_x, du->hotspot_y);
169
170                 ttm_bo_kunmap(&map);
171 err_unreserve:
172                 ttm_bo_unreserve(&dmabuf->base);
173
174         } else {
175                 vmw_cursor_update_position(dev_priv, false, 0, 0);
176                 return 0;
177         }
178
179         vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
180
181         return 0;
182 }
183
184 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
185 {
186         struct vmw_private *dev_priv = vmw_priv(crtc->dev);
187         struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
188         bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
189
190         du->cursor_x = x + crtc->x;
191         du->cursor_y = y + crtc->y;
192
193         vmw_cursor_update_position(dev_priv, shown,
194                                    du->cursor_x, du->cursor_y);
195
196         return 0;
197 }
198
199 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
200                           struct ttm_object_file *tfile,
201                           struct ttm_buffer_object *bo,
202                           SVGA3dCmdHeader *header)
203 {
204         struct ttm_bo_kmap_obj map;
205         unsigned long kmap_offset;
206         unsigned long kmap_num;
207         SVGA3dCopyBox *box;
208         unsigned box_count;
209         void *virtual;
210         bool dummy;
211         struct vmw_dma_cmd {
212                 SVGA3dCmdHeader header;
213                 SVGA3dCmdSurfaceDMA dma;
214         } *cmd;
215         int ret;
216
217         cmd = container_of(header, struct vmw_dma_cmd, header);
218
219         /* No snooper installed */
220         if (!srf->snooper.image)
221                 return;
222
223         if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
224                 DRM_ERROR("face and mipmap for cursors should never != 0\n");
225                 return;
226         }
227
228         if (cmd->header.size < 64) {
229                 DRM_ERROR("at least one full copy box must be given\n");
230                 return;
231         }
232
233         box = (SVGA3dCopyBox *)&cmd[1];
234         box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
235                         sizeof(SVGA3dCopyBox);
236
237         if (cmd->dma.guest.pitch != (64 * 4) ||
238             cmd->dma.guest.ptr.offset % PAGE_SIZE ||
239             box->x != 0    || box->y != 0    || box->z != 0    ||
240             box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
241             box->w != 64   || box->h != 64   || box->d != 1    ||
242             box_count != 1) {
243                 /* TODO handle none page aligned offsets */
244                 /* TODO handle partial uploads and pitch != 256 */
245                 /* TODO handle more then one copy (size != 64) */
246                 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
247                 return;
248         }
249
250         kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
251         kmap_num = (64*64*4) >> PAGE_SHIFT;
252
253         ret = ttm_bo_reserve(bo, true, false, false, 0);
254         if (unlikely(ret != 0)) {
255                 DRM_ERROR("reserve failed\n");
256                 return;
257         }
258
259         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
260         if (unlikely(ret != 0))
261                 goto err_unreserve;
262
263         virtual = ttm_kmap_obj_virtual(&map, &dummy);
264
265         memcpy(srf->snooper.image, virtual, 64*64*4);
266         srf->snooper.age++;
267
268         /* we can't call this function from this function since execbuf has
269          * reserved fifo space.
270          *
271          * if (srf->snooper.crtc)
272          *      vmw_ldu_crtc_cursor_update_image(dev_priv,
273          *                                       srf->snooper.image, 64, 64,
274          *                                       du->hotspot_x, du->hotspot_y);
275          */
276
277         ttm_bo_kunmap(&map);
278 err_unreserve:
279         ttm_bo_unreserve(bo);
280 }
281
282 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
283 {
284         struct drm_device *dev = dev_priv->dev;
285         struct vmw_display_unit *du;
286         struct drm_crtc *crtc;
287
288         mutex_lock(&dev->mode_config.mutex);
289
290         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
291                 du = vmw_crtc_to_du(crtc);
292                 if (!du->cursor_surface ||
293                     du->cursor_age == du->cursor_surface->snooper.age)
294                         continue;
295
296                 du->cursor_age = du->cursor_surface->snooper.age;
297                 vmw_cursor_update_image(dev_priv,
298                                         du->cursor_surface->snooper.image,
299                                         64, 64, du->hotspot_x, du->hotspot_y);
300         }
301
302         mutex_unlock(&dev->mode_config.mutex);
303 }
304
305 /*
306  * Generic framebuffer code
307  */
308
309 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
310                                   struct drm_file *file_priv,
311                                   unsigned int *handle)
312 {
313         if (handle)
314                 handle = 0;
315
316         return 0;
317 }
318
319 /*
320  * Surface framebuffer code
321  */
322
323 #define vmw_framebuffer_to_vfbs(x) \
324         container_of(x, struct vmw_framebuffer_surface, base.base)
325
326 struct vmw_framebuffer_surface {
327         struct vmw_framebuffer base;
328         struct vmw_surface *surface;
329         struct delayed_work d_work;
330         struct mutex work_lock;
331         bool present_fs;
332 };
333
334 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
335 {
336         struct vmw_framebuffer_surface *vfb =
337                 vmw_framebuffer_to_vfbs(framebuffer);
338
339         cancel_delayed_work_sync(&vfb->d_work);
340         drm_framebuffer_cleanup(framebuffer);
341         vmw_surface_unreference(&vfb->surface);
342
343         kfree(framebuffer);
344 }
345
346 static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
347 {
348         struct delayed_work *d_work =
349                 container_of(work, struct delayed_work, work);
350         struct vmw_framebuffer_surface *vfbs =
351                 container_of(d_work, struct vmw_framebuffer_surface, d_work);
352         struct vmw_surface *surf = vfbs->surface;
353         struct drm_framebuffer *framebuffer = &vfbs->base.base;
354         struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
355
356         struct {
357                 SVGA3dCmdHeader header;
358                 SVGA3dCmdPresent body;
359                 SVGA3dCopyRect cr;
360         } *cmd;
361
362         mutex_lock(&vfbs->work_lock);
363         if (!vfbs->present_fs)
364                 goto out_unlock;
365
366         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
367         if (unlikely(cmd == NULL))
368                 goto out_resched;
369
370         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
371         cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
372         cmd->body.sid = cpu_to_le32(surf->res.id);
373         cmd->cr.x = cpu_to_le32(0);
374         cmd->cr.y = cpu_to_le32(0);
375         cmd->cr.srcx = cmd->cr.x;
376         cmd->cr.srcy = cmd->cr.y;
377         cmd->cr.w = cpu_to_le32(framebuffer->width);
378         cmd->cr.h = cpu_to_le32(framebuffer->height);
379         vfbs->present_fs = false;
380         vmw_fifo_commit(dev_priv, sizeof(*cmd));
381 out_resched:
382         /**
383          * Will not re-add if already pending.
384          */
385         schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
386 out_unlock:
387         mutex_unlock(&vfbs->work_lock);
388 }
389
390
391 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
392                                   unsigned flags, unsigned color,
393                                   struct drm_clip_rect *clips,
394                                   unsigned num_clips)
395 {
396         struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
397         struct vmw_framebuffer_surface *vfbs =
398                 vmw_framebuffer_to_vfbs(framebuffer);
399         struct vmw_surface *surf = vfbs->surface;
400         struct drm_clip_rect norect;
401         SVGA3dCopyRect *cr;
402         int i, inc = 1;
403
404         struct {
405                 SVGA3dCmdHeader header;
406                 SVGA3dCmdPresent body;
407                 SVGA3dCopyRect cr;
408         } *cmd;
409
410         if (!num_clips ||
411             !(dev_priv->fifo.capabilities &
412               SVGA_FIFO_CAP_SCREEN_OBJECT)) {
413                 int ret;
414
415                 mutex_lock(&vfbs->work_lock);
416                 vfbs->present_fs = true;
417                 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
418                 mutex_unlock(&vfbs->work_lock);
419                 if (ret) {
420                         /**
421                          * No work pending, Force immediate present.
422                          */
423                         vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
424                 }
425                 return 0;
426         }
427
428         if (!num_clips) {
429                 num_clips = 1;
430                 clips = &norect;
431                 norect.x1 = norect.y1 = 0;
432                 norect.x2 = framebuffer->width;
433                 norect.y2 = framebuffer->height;
434         } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
435                 num_clips /= 2;
436                 inc = 2; /* skip source rects */
437         }
438
439         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
440         if (unlikely(cmd == NULL)) {
441                 DRM_ERROR("Fifo reserve failed.\n");
442                 return -ENOMEM;
443         }
444
445         memset(cmd, 0, sizeof(*cmd));
446
447         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
448         cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
449         cmd->body.sid = cpu_to_le32(surf->res.id);
450
451         for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
452                 cr->x = cpu_to_le16(clips->x1);
453                 cr->y = cpu_to_le16(clips->y1);
454                 cr->srcx = cr->x;
455                 cr->srcy = cr->y;
456                 cr->w = cpu_to_le16(clips->x2 - clips->x1);
457                 cr->h = cpu_to_le16(clips->y2 - clips->y1);
458         }
459
460         vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
461
462         return 0;
463 }
464
465 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
466         .destroy = vmw_framebuffer_surface_destroy,
467         .dirty = vmw_framebuffer_surface_dirty,
468         .create_handle = vmw_framebuffer_create_handle,
469 };
470
471 int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
472                                     struct vmw_surface *surface,
473                                     struct vmw_framebuffer **out,
474                                     unsigned width, unsigned height)
475
476 {
477         struct drm_device *dev = dev_priv->dev;
478         struct vmw_framebuffer_surface *vfbs;
479         int ret;
480
481         vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
482         if (!vfbs) {
483                 ret = -ENOMEM;
484                 goto out_err1;
485         }
486
487         ret = drm_framebuffer_init(dev, &vfbs->base.base,
488                                    &vmw_framebuffer_surface_funcs);
489         if (ret)
490                 goto out_err2;
491
492         if (!vmw_surface_reference(surface)) {
493                 DRM_ERROR("failed to reference surface %p\n", surface);
494                 goto out_err3;
495         }
496
497         /* XXX get the first 3 from the surface info */
498         vfbs->base.base.bits_per_pixel = 32;
499         vfbs->base.base.pitch = width * 32 / 4;
500         vfbs->base.base.depth = 24;
501         vfbs->base.base.width = width;
502         vfbs->base.base.height = height;
503         vfbs->base.pin = NULL;
504         vfbs->base.unpin = NULL;
505         vfbs->surface = surface;
506         mutex_init(&vfbs->work_lock);
507         INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
508         *out = &vfbs->base;
509
510         return 0;
511
512 out_err3:
513         drm_framebuffer_cleanup(&vfbs->base.base);
514 out_err2:
515         kfree(vfbs);
516 out_err1:
517         return ret;
518 }
519
520 /*
521  * Dmabuf framebuffer code
522  */
523
524 #define vmw_framebuffer_to_vfbd(x) \
525         container_of(x, struct vmw_framebuffer_dmabuf, base.base)
526
527 struct vmw_framebuffer_dmabuf {
528         struct vmw_framebuffer base;
529         struct vmw_dma_buffer *buffer;
530 };
531
532 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
533 {
534         struct vmw_framebuffer_dmabuf *vfbd =
535                 vmw_framebuffer_to_vfbd(framebuffer);
536
537         drm_framebuffer_cleanup(framebuffer);
538         vmw_dmabuf_unreference(&vfbd->buffer);
539
540         kfree(vfbd);
541 }
542
543 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
544                                  unsigned flags, unsigned color,
545                                  struct drm_clip_rect *clips,
546                                  unsigned num_clips)
547 {
548         struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
549         struct drm_clip_rect norect;
550         struct {
551                 uint32_t header;
552                 SVGAFifoCmdUpdate body;
553         } *cmd;
554         int i, increment = 1;
555
556         if (!num_clips) {
557                 num_clips = 1;
558                 clips = &norect;
559                 norect.x1 = norect.y1 = 0;
560                 norect.x2 = framebuffer->width;
561                 norect.y2 = framebuffer->height;
562         } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
563                 num_clips /= 2;
564                 increment = 2;
565         }
566
567         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
568         if (unlikely(cmd == NULL)) {
569                 DRM_ERROR("Fifo reserve failed.\n");
570                 return -ENOMEM;
571         }
572
573         for (i = 0; i < num_clips; i++, clips += increment) {
574                 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
575                 cmd[i].body.x = cpu_to_le32(clips->x1);
576                 cmd[i].body.y = cpu_to_le32(clips->y1);
577                 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
578                 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
579         }
580
581         vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
582
583         return 0;
584 }
585
586 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
587         .destroy = vmw_framebuffer_dmabuf_destroy,
588         .dirty = vmw_framebuffer_dmabuf_dirty,
589         .create_handle = vmw_framebuffer_create_handle,
590 };
591
592 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
593 {
594         struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
595         struct vmw_framebuffer_dmabuf *vfbd =
596                 vmw_framebuffer_to_vfbd(&vfb->base);
597         int ret;
598
599
600         vmw_overlay_pause_all(dev_priv);
601
602         ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
603
604         vmw_overlay_resume_all(dev_priv);
605
606         return 0;
607 }
608
609 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
610 {
611         struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
612         struct vmw_framebuffer_dmabuf *vfbd =
613                 vmw_framebuffer_to_vfbd(&vfb->base);
614
615         if (!vfbd->buffer) {
616                 WARN_ON(!vfbd->buffer);
617                 return 0;
618         }
619
620         return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
621 }
622
623 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
624                                    struct vmw_dma_buffer *dmabuf,
625                                    struct vmw_framebuffer **out,
626                                    unsigned width, unsigned height)
627
628 {
629         struct drm_device *dev = dev_priv->dev;
630         struct vmw_framebuffer_dmabuf *vfbd;
631         int ret;
632
633         vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
634         if (!vfbd) {
635                 ret = -ENOMEM;
636                 goto out_err1;
637         }
638
639         ret = drm_framebuffer_init(dev, &vfbd->base.base,
640                                    &vmw_framebuffer_dmabuf_funcs);
641         if (ret)
642                 goto out_err2;
643
644         if (!vmw_dmabuf_reference(dmabuf)) {
645                 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
646                 goto out_err3;
647         }
648
649         /* XXX get the first 3 from the surface info */
650         vfbd->base.base.bits_per_pixel = 32;
651         vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
652         vfbd->base.base.depth = 24;
653         vfbd->base.base.width = width;
654         vfbd->base.base.height = height;
655         vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
656         vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
657         vfbd->buffer = dmabuf;
658         *out = &vfbd->base;
659
660         return 0;
661
662 out_err3:
663         drm_framebuffer_cleanup(&vfbd->base.base);
664 out_err2:
665         kfree(vfbd);
666 out_err1:
667         return ret;
668 }
669
670 /*
671  * Generic Kernel modesetting functions
672  */
673
674 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
675                                                  struct drm_file *file_priv,
676                                                  struct drm_mode_fb_cmd *mode_cmd)
677 {
678         struct vmw_private *dev_priv = vmw_priv(dev);
679         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
680         struct vmw_framebuffer *vfb = NULL;
681         struct vmw_surface *surface = NULL;
682         struct vmw_dma_buffer *bo = NULL;
683         int ret;
684
685         ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
686                                              mode_cmd->handle, &surface);
687         if (ret)
688                 goto try_dmabuf;
689
690         if (!surface->scanout)
691                 goto err_not_scanout;
692
693         ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
694                                               mode_cmd->width, mode_cmd->height);
695
696         /* vmw_user_surface_lookup takes one ref so does new_fb */
697         vmw_surface_unreference(&surface);
698
699         if (ret) {
700                 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
701                 return NULL;
702         }
703         return &vfb->base;
704
705 try_dmabuf:
706         DRM_INFO("%s: trying buffer\n", __func__);
707
708         ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
709         if (ret) {
710                 DRM_ERROR("failed to find buffer: %i\n", ret);
711                 return NULL;
712         }
713
714         ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
715                                              mode_cmd->width, mode_cmd->height);
716
717         /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
718         vmw_dmabuf_unreference(&bo);
719
720         if (ret) {
721                 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
722                 return NULL;
723         }
724
725         return &vfb->base;
726
727 err_not_scanout:
728         DRM_ERROR("surface not marked as scanout\n");
729         /* vmw_user_surface_lookup takes one ref */
730         vmw_surface_unreference(&surface);
731
732         return NULL;
733 }
734
735 static struct drm_mode_config_funcs vmw_kms_funcs = {
736         .fb_create = vmw_kms_fb_create,
737 };
738
739 int vmw_kms_init(struct vmw_private *dev_priv)
740 {
741         struct drm_device *dev = dev_priv->dev;
742         int ret;
743
744         drm_mode_config_init(dev);
745         dev->mode_config.funcs = &vmw_kms_funcs;
746         dev->mode_config.min_width = 1;
747         dev->mode_config.min_height = 1;
748         /* assumed largest fb size */
749         dev->mode_config.max_width = 8192;
750         dev->mode_config.max_height = 8192;
751
752         ret = vmw_kms_init_legacy_display_system(dev_priv);
753
754         return 0;
755 }
756
757 int vmw_kms_close(struct vmw_private *dev_priv)
758 {
759         /*
760          * Docs says we should take the lock before calling this function
761          * but since it destroys encoders and our destructor calls
762          * drm_encoder_cleanup which takes the lock we deadlock.
763          */
764         drm_mode_config_cleanup(dev_priv->dev);
765         vmw_kms_close_legacy_display_system(dev_priv);
766         return 0;
767 }
768
769 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
770                                 struct drm_file *file_priv)
771 {
772         struct drm_vmw_cursor_bypass_arg *arg = data;
773         struct vmw_display_unit *du;
774         struct drm_mode_object *obj;
775         struct drm_crtc *crtc;
776         int ret = 0;
777
778
779         mutex_lock(&dev->mode_config.mutex);
780         if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
781
782                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
783                         du = vmw_crtc_to_du(crtc);
784                         du->hotspot_x = arg->xhot;
785                         du->hotspot_y = arg->yhot;
786                 }
787
788                 mutex_unlock(&dev->mode_config.mutex);
789                 return 0;
790         }
791
792         obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
793         if (!obj) {
794                 ret = -EINVAL;
795                 goto out;
796         }
797
798         crtc = obj_to_crtc(obj);
799         du = vmw_crtc_to_du(crtc);
800
801         du->hotspot_x = arg->xhot;
802         du->hotspot_y = arg->yhot;
803
804 out:
805         mutex_unlock(&dev->mode_config.mutex);
806
807         return ret;
808 }
809
810 void vmw_kms_write_svga(struct vmw_private *vmw_priv,
811                         unsigned width, unsigned height, unsigned pitch,
812                         unsigned bbp, unsigned depth)
813 {
814         if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
815                 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
816         else if (vmw_fifo_have_pitchlock(vmw_priv))
817                 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
818         vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
819         vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
820         vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
821         vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
822         vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
823         vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
824         vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
825 }
826
827 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
828 {
829         vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
830         vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
831         vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
832         vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
833         vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
834         vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
835         vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
836         vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
837         if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
838                 vmw_priv->vga_pitchlock =
839                         vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
840         else if (vmw_fifo_have_pitchlock(vmw_priv))
841                 vmw_priv->vga_pitchlock =
842                         ioread32(vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
843
844         return 0;
845 }
846
847 int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
848 {
849         vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
850         vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
851         vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
852         vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
853         vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
854         vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
855         vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
856         vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
857         if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
858                 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
859                           vmw_priv->vga_pitchlock);
860         else if (vmw_fifo_have_pitchlock(vmw_priv))
861                 iowrite32(vmw_priv->vga_pitchlock,
862                           vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
863
864         return 0;
865 }