drm/nv50: fix iommu errors caused by device reading from address 0
[safe/jmp/linux-2.6] / drivers / gpu / drm / nouveau / nouveau_fence.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29
30 #include "nouveau_drv.h"
31 #include "nouveau_dma.h"
32
33 #define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35 struct nouveau_fence {
36         struct nouveau_channel *channel;
37         struct kref refcount;
38         struct list_head entry;
39
40         uint32_t sequence;
41         bool signalled;
42 };
43
44 static inline struct nouveau_fence *
45 nouveau_fence(void *sync_obj)
46 {
47         return (struct nouveau_fence *)sync_obj;
48 }
49
50 static void
51 nouveau_fence_del(struct kref *ref)
52 {
53         struct nouveau_fence *fence =
54                 container_of(ref, struct nouveau_fence, refcount);
55
56         kfree(fence);
57 }
58
59 void
60 nouveau_fence_update(struct nouveau_channel *chan)
61 {
62         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63         struct list_head *entry, *tmp;
64         struct nouveau_fence *fence;
65         uint32_t sequence;
66
67         if (USE_REFCNT)
68                 sequence = nvchan_rd32(chan, 0x48);
69         else
70                 sequence = chan->fence.last_sequence_irq;
71
72         if (chan->fence.sequence_ack == sequence)
73                 return;
74         chan->fence.sequence_ack = sequence;
75
76         list_for_each_safe(entry, tmp, &chan->fence.pending) {
77                 fence = list_entry(entry, struct nouveau_fence, entry);
78
79                 sequence = fence->sequence;
80                 fence->signalled = true;
81                 list_del(&fence->entry);
82                 kref_put(&fence->refcount, nouveau_fence_del);
83
84                 if (sequence == chan->fence.sequence_ack)
85                         break;
86         }
87 }
88
89 int
90 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
91                   bool emit)
92 {
93         struct nouveau_fence *fence;
94         int ret = 0;
95
96         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
97         if (!fence)
98                 return -ENOMEM;
99         kref_init(&fence->refcount);
100         fence->channel = chan;
101
102         if (emit)
103                 ret = nouveau_fence_emit(fence);
104
105         if (ret)
106                 nouveau_fence_unref((void *)&fence);
107         *pfence = fence;
108         return ret;
109 }
110
111 struct nouveau_channel *
112 nouveau_fence_channel(struct nouveau_fence *fence)
113 {
114         return fence ? fence->channel : NULL;
115 }
116
117 int
118 nouveau_fence_emit(struct nouveau_fence *fence)
119 {
120         struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
121         struct nouveau_channel *chan = fence->channel;
122         unsigned long flags;
123         int ret;
124
125         ret = RING_SPACE(chan, 2);
126         if (ret)
127                 return ret;
128
129         if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
130                 spin_lock_irqsave(&chan->fence.lock, flags);
131                 nouveau_fence_update(chan);
132                 spin_unlock_irqrestore(&chan->fence.lock, flags);
133
134                 BUG_ON(chan->fence.sequence ==
135                        chan->fence.sequence_ack - 1);
136         }
137
138         fence->sequence = ++chan->fence.sequence;
139
140         kref_get(&fence->refcount);
141         spin_lock_irqsave(&chan->fence.lock, flags);
142         list_add_tail(&fence->entry, &chan->fence.pending);
143         spin_unlock_irqrestore(&chan->fence.lock, flags);
144
145         BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
146         OUT_RING(chan, fence->sequence);
147         FIRE_RING(chan);
148
149         return 0;
150 }
151
152 void
153 nouveau_fence_unref(void **sync_obj)
154 {
155         struct nouveau_fence *fence = nouveau_fence(*sync_obj);
156
157         if (fence)
158                 kref_put(&fence->refcount, nouveau_fence_del);
159         *sync_obj = NULL;
160 }
161
162 void *
163 nouveau_fence_ref(void *sync_obj)
164 {
165         struct nouveau_fence *fence = nouveau_fence(sync_obj);
166
167         kref_get(&fence->refcount);
168         return sync_obj;
169 }
170
171 bool
172 nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173 {
174         struct nouveau_fence *fence = nouveau_fence(sync_obj);
175         struct nouveau_channel *chan = fence->channel;
176         unsigned long flags;
177
178         if (fence->signalled)
179                 return true;
180
181         spin_lock_irqsave(&chan->fence.lock, flags);
182         nouveau_fence_update(chan);
183         spin_unlock_irqrestore(&chan->fence.lock, flags);
184         return fence->signalled;
185 }
186
187 int
188 nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
189 {
190         unsigned long timeout = jiffies + (3 * DRM_HZ);
191         int ret = 0;
192
193         __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
194
195         while (1) {
196                 if (nouveau_fence_signalled(sync_obj, sync_arg))
197                         break;
198
199                 if (time_after_eq(jiffies, timeout)) {
200                         ret = -EBUSY;
201                         break;
202                 }
203
204                 if (lazy)
205                         schedule_timeout(1);
206
207                 if (intr && signal_pending(current)) {
208                         ret = -ERESTARTSYS;
209                         break;
210                 }
211         }
212
213         __set_current_state(TASK_RUNNING);
214
215         return ret;
216 }
217
218 int
219 nouveau_fence_flush(void *sync_obj, void *sync_arg)
220 {
221         return 0;
222 }
223
224 void
225 nouveau_fence_handler(struct drm_device *dev, int channel)
226 {
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct nouveau_channel *chan = NULL;
229
230         if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
231                 chan = dev_priv->fifos[channel];
232
233         if (chan) {
234                 spin_lock_irq(&chan->fence.lock);
235                 nouveau_fence_update(chan);
236                 spin_unlock_irq(&chan->fence.lock);
237         }
238 }
239
240 int
241 nouveau_fence_init(struct nouveau_channel *chan)
242 {
243         INIT_LIST_HEAD(&chan->fence.pending);
244         spin_lock_init(&chan->fence.lock);
245         return 0;
246 }
247
248 void
249 nouveau_fence_fini(struct nouveau_channel *chan)
250 {
251         struct list_head *entry, *tmp;
252         struct nouveau_fence *fence;
253
254         list_for_each_safe(entry, tmp, &chan->fence.pending) {
255                 fence = list_entry(entry, struct nouveau_fence, entry);
256
257                 fence->signalled = true;
258                 list_del(&fence->entry);
259                 kref_put(&fence->refcount, nouveau_fence_del);
260         }
261 }
262