V4L/DVB (9891): cx18 Replace magic number 63 with CX18_MAX_FW_MDLS_PER_STREAM
[safe/jmp/linux-2.6] / drivers / media / video / cx18 / cx18-queue.c
1 /*
2  *  cx18 buffer queues
3  *
4  *  Derived from ivtv-queue.c
5  *
6  *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
7  *  Copyright (C) 2008  Andy Walls <awalls@radix.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or
12  *  (at your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22  *  02111-1307  USA
23  */
24
25 #include "cx18-driver.h"
26 #include "cx18-streams.h"
27 #include "cx18-queue.h"
28 #include "cx18-scb.h"
29
30 void cx18_buf_swap(struct cx18_buffer *buf)
31 {
32         int i;
33
34         for (i = 0; i < buf->bytesused; i += 4)
35                 swab32s((u32 *)(buf->buf + i));
36 }
37
38 void cx18_queue_init(struct cx18_queue *q)
39 {
40         INIT_LIST_HEAD(&q->list);
41         atomic_set(&q->buffers, 0);
42         q->bytesused = 0;
43 }
44
45 struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
46                                  struct cx18_queue *q, int to_front)
47 {
48         /* clear the buffer if it is not to be enqueued to the full queue */
49         if (q != &s->q_full) {
50                 buf->bytesused = 0;
51                 buf->readpos = 0;
52                 buf->b_flags = 0;
53                 buf->skipped = 0;
54         }
55
56         mutex_lock(&s->qlock);
57
58         /* q_busy is restricted to a max buffer count imposed by firmware */
59         if (q == &s->q_busy &&
60             atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM)
61                 q = &s->q_free;
62
63         if (to_front)
64                 list_add(&buf->list, &q->list); /* LIFO */
65         else
66                 list_add_tail(&buf->list, &q->list); /* FIFO */
67         q->bytesused += buf->bytesused - buf->readpos;
68         atomic_inc(&q->buffers);
69
70         mutex_unlock(&s->qlock);
71         return q;
72 }
73
74 struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
75 {
76         struct cx18_buffer *buf = NULL;
77
78         mutex_lock(&s->qlock);
79         if (!list_empty(&q->list)) {
80                 buf = list_entry(q->list.next, struct cx18_buffer, list);
81                 list_del_init(q->list.next);
82                 q->bytesused -= buf->bytesused - buf->readpos;
83                 buf->skipped = 0;
84                 atomic_dec(&q->buffers);
85         }
86         mutex_unlock(&s->qlock);
87         return buf;
88 }
89
90 struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
91         u32 bytesused)
92 {
93         struct cx18 *cx = s->cx;
94         struct cx18_buffer *buf;
95         struct cx18_buffer *ret = NULL;
96         struct list_head *p, *t;
97
98         mutex_lock(&s->qlock);
99         list_for_each_safe(p, t, &s->q_busy.list) {
100                 buf = list_entry(p, struct cx18_buffer, list);
101
102                 if (buf->id != id) {
103                         buf->skipped++;
104                         if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
105                                 /* buffer must have fallen out of rotation */
106                                 CX18_WARN("Skipped %s, buffer %d, %d "
107                                           "times - it must have dropped out of "
108                                           "rotation\n", s->name, buf->id,
109                                           buf->skipped);
110                                 /* move it to q_free */
111                                 list_move_tail(&buf->list, &s->q_free.list);
112                                 buf->bytesused = buf->readpos = buf->b_flags =
113                                         buf->skipped = 0;
114                                 atomic_dec(&s->q_busy.buffers);
115                                 atomic_inc(&s->q_free.buffers);
116                         }
117                         continue;
118                 }
119
120                 buf->bytesused = bytesused;
121                 /* Sync the buffer before we release the qlock */
122                 cx18_buf_sync_for_cpu(s, buf);
123                 if (s->type == CX18_ENC_STREAM_TYPE_TS) {
124                         /*
125                          * TS doesn't use q_full.  As we pull the buffer off of
126                          * the queue here, the caller will have to put it back.
127                          */
128                         list_del_init(&buf->list);
129                 } else {
130                         /* Move buffer from q_busy to q_full */
131                         list_move_tail(&buf->list, &s->q_full.list);
132                         set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
133                         s->q_full.bytesused += buf->bytesused;
134                         atomic_inc(&s->q_full.buffers);
135                 }
136                 atomic_dec(&s->q_busy.buffers);
137
138                 ret = buf;
139                 break;
140         }
141         mutex_unlock(&s->qlock);
142         return ret;
143 }
144
145 /* Move all buffers of a queue to q_free, while flushing the buffers */
146 static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
147 {
148         struct cx18_buffer *buf;
149
150         if (q == &s->q_free)
151                 return;
152
153         mutex_lock(&s->qlock);
154         while (!list_empty(&q->list)) {
155                 buf = list_entry(q->list.next, struct cx18_buffer, list);
156                 list_move_tail(q->list.next, &s->q_free.list);
157                 buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
158                 atomic_inc(&s->q_free.buffers);
159         }
160         cx18_queue_init(q);
161         mutex_unlock(&s->qlock);
162 }
163
164 void cx18_flush_queues(struct cx18_stream *s)
165 {
166         cx18_queue_flush(s, &s->q_busy);
167         cx18_queue_flush(s, &s->q_full);
168 }
169
170 int cx18_stream_alloc(struct cx18_stream *s)
171 {
172         struct cx18 *cx = s->cx;
173         int i;
174
175         if (s->buffers == 0)
176                 return 0;
177
178         CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
179                 s->name, s->buffers, s->buf_size,
180                 s->buffers * s->buf_size / 1024);
181
182         if (((char __iomem *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
183                                 (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
184                 unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
185                                         ((char __iomem *)cx->scb->cpu_mdl));
186
187                 CX18_ERR("Too many buffers, cannot fit in SCB area\n");
188                 CX18_ERR("Max buffers = %zd\n",
189                         bufsz / sizeof(struct cx18_mdl));
190                 return -ENOMEM;
191         }
192
193         s->mdl_offset = cx->mdl_offset;
194
195         /* allocate stream buffers. Initially all buffers are in q_free. */
196         for (i = 0; i < s->buffers; i++) {
197                 struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer),
198                                                 GFP_KERNEL|__GFP_NOWARN);
199
200                 if (buf == NULL)
201                         break;
202                 buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
203                 if (buf->buf == NULL) {
204                         kfree(buf);
205                         break;
206                 }
207                 buf->id = cx->buffer_id++;
208                 INIT_LIST_HEAD(&buf->list);
209                 buf->dma_handle = pci_map_single(s->cx->dev,
210                                 buf->buf, s->buf_size, s->dma);
211                 cx18_buf_sync_for_cpu(s, buf);
212                 cx18_enqueue(s, buf, &s->q_free);
213         }
214         if (i == s->buffers) {
215                 cx->mdl_offset += s->buffers;
216                 return 0;
217         }
218         CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
219         cx18_stream_free(s);
220         return -ENOMEM;
221 }
222
223 void cx18_stream_free(struct cx18_stream *s)
224 {
225         struct cx18_buffer *buf;
226
227         /* move all buffers to q_free */
228         cx18_flush_queues(s);
229
230         /* empty q_free */
231         while ((buf = cx18_dequeue(s, &s->q_free))) {
232                 pci_unmap_single(s->cx->dev, buf->dma_handle,
233                                 s->buf_size, s->dma);
234                 kfree(buf->buf);
235                 kfree(buf);
236         }
237 }