drm/nouveau: completely fail init if we fail to map the PRAMIN BAR
[safe/jmp/linux-2.6] / drivers / gpu / drm / nouveau / nouveau_channel.c
index d1c7f8c..1fc57ef 100644 (file)
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_bo *pb = chan->pushbuf_bo;
        struct nouveau_gpuobj *pushbuf = NULL;
-       uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
        int ret;
 
+       if (dev_priv->card_type >= NV_50) {
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+                                            dev_priv->vm_end, NV_DMA_ACCESS_RO,
+                                            NV_DMA_TARGET_AGP, &pushbuf);
+               chan->pushbuf_base = pb->bo.offset;
+       } else
        if (pb->bo.mem.mem_type == TTM_PL_TT) {
                ret = nouveau_gpuobj_gart_dma_new(chan, 0,
                                                  dev_priv->gart_info.aper_size,
                                                  NV_DMA_ACCESS_RO, &pushbuf,
                                                  NULL);
-               chan->pushbuf_base = start;
+               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
        } else
        if (dev_priv->card_type != NV_04) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
                                             dev_priv->fb_available_size,
                                             NV_DMA_ACCESS_RO,
                                             NV_DMA_TARGET_VIDMEM, &pushbuf);
-               chan->pushbuf_base = start;
+               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
        } else {
                /* NV04 cmdbuf hack, from original ddx.. not sure of it's
                 * exact reason for existing :)  PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
                                             dev_priv->fb_available_size,
                                             NV_DMA_ACCESS_RO,
                                             NV_DMA_TARGET_PCI, &pushbuf);
-               chan->pushbuf_base = start;
+               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
        }
 
        ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -137,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
                                           GFP_KERNEL);
        if (!dev_priv->fifos[channel])
                return -ENOMEM;
-       dev_priv->fifo_alloc_count++;
        chan = dev_priv->fifos[channel];
        INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
        INIT_LIST_HEAD(&chan->fence.pending);
@@ -158,6 +162,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
                return ret;
        }
 
+       nouveau_dma_pre_init(chan);
+
        /* Locate channel's user control regs */
        if (dev_priv->card_type < NV_40)
                user = NV03_USER(channel);
@@ -273,15 +279,23 @@ nouveau_channel_free(struct nouveau_channel *chan)
         */
        nouveau_fence_fini(chan);
 
-       /* Ensure the channel is no longer active on the GPU */
+       /* This will prevent pfifo from switching channels. */
        pfifo->reassign(dev, false);
 
-       if (pgraph->channel(dev) == chan) {
-               pgraph->fifo_access(dev, false);
+       /* We want to give pgraph a chance to idle and get rid of all potential
+        * errors. We need to do this before the lock, otherwise the irq handler
+        * is unable to process them.
+        */
+       if (pgraph->channel(dev) == chan)
+               nouveau_wait_for_idle(dev);
+
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
+       pgraph->fifo_access(dev, false);
+       if (pgraph->channel(dev) == chan)
                pgraph->unload_context(dev);
-               pgraph->fifo_access(dev, true);
-       }
        pgraph->destroy_context(chan);
+       pgraph->fifo_access(dev, true);
 
        if (pfifo->channel_id(dev) == chan->id) {
                pfifo->disable(dev);
@@ -292,6 +306,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
 
        pfifo->reassign(dev, true);
 
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
        /* Release the channel's resources */
        nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
        if (chan->pushbuf_bo) {
@@ -304,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan)
                iounmap(chan->user);
 
        dev_priv->fifos[chan->id] = NULL;
-       dev_priv->fifo_alloc_count--;
        kfree(chan);
 }
 
@@ -368,6 +383,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
                return ret;
        init->channel  = chan->id;
 
+       if (chan->dma.ib_max)
+               init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+                                       NOUVEAU_GEM_DOMAIN_GART;
+       else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+               init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+       else
+               init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+
        init->subchan[0].handle = NvM2MF;
        if (dev_priv->card_type < NV_50)
                init->subchan[0].grclass = 0x0039;
@@ -407,7 +430,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -417,13 +439,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
        DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);