[POWERPC] spufs: check spu_acquire_runnable() return value
[safe/jmp/linux-2.6] / arch / powerpc / platforms / cell / spufs / file.c
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #undef DEBUG
24
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
37
38 #include "spufs.h"
39
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
41
42 static int
43 spufs_mem_open(struct inode *inode, struct file *file)
44 {
45         struct spufs_inode_info *i = SPUFS_I(inode);
46         struct spu_context *ctx = i->i_ctx;
47
48         spin_lock(&ctx->mapping_lock);
49         file->private_data = ctx;
50         if (!i->i_openers++)
51                 ctx->local_store = inode->i_mapping;
52         spin_unlock(&ctx->mapping_lock);
53         smp_wmb();
54         return 0;
55 }
56
57 static int
58 spufs_mem_release(struct inode *inode, struct file *file)
59 {
60         struct spufs_inode_info *i = SPUFS_I(inode);
61         struct spu_context *ctx = i->i_ctx;
62
63         spin_lock(&ctx->mapping_lock);
64         if (!--i->i_openers)
65                 ctx->local_store = NULL;
66         spin_unlock(&ctx->mapping_lock);
67         smp_wmb();
68         return 0;
69 }
70
71 static ssize_t
72 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73                         size_t size, loff_t *pos)
74 {
75         char *local_store = ctx->ops->get_ls(ctx);
76         return simple_read_from_buffer(buffer, size, pos, local_store,
77                                         LS_SIZE);
78 }
79
80 static ssize_t
81 spufs_mem_read(struct file *file, char __user *buffer,
82                                 size_t size, loff_t *pos)
83 {
84         struct spu_context *ctx = file->private_data;
85         ssize_t ret;
86
87         spu_acquire(ctx);
88         ret = __spufs_mem_read(ctx, buffer, size, pos);
89         spu_release(ctx);
90         return ret;
91 }
92
93 static ssize_t
94 spufs_mem_write(struct file *file, const char __user *buffer,
95                                         size_t size, loff_t *ppos)
96 {
97         struct spu_context *ctx = file->private_data;
98         char *local_store;
99         loff_t pos = *ppos;
100         int ret;
101
102         if (pos < 0)
103                 return -EINVAL;
104         if (pos > LS_SIZE)
105                 return -EFBIG;
106         if (size > LS_SIZE - pos)
107                 size = LS_SIZE - pos;
108
109         spu_acquire(ctx);
110         local_store = ctx->ops->get_ls(ctx);
111         ret = copy_from_user(local_store + pos, buffer, size);
112         spu_release(ctx);
113
114         if (ret)
115                 return -EFAULT;
116         *ppos = pos + size;
117         return size;
118 }
119
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121                                           unsigned long address)
122 {
123         struct spu_context *ctx = vma->vm_file->private_data;
124         unsigned long pfn, offset = address - vma->vm_start;
125
126         offset += vma->vm_pgoff << PAGE_SHIFT;
127
128         if (offset >= LS_SIZE)
129                 return NOPFN_SIGBUS;
130
131         spu_acquire(ctx);
132
133         if (ctx->state == SPU_STATE_SAVED) {
134                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
135                                                         & ~_PAGE_NO_CACHE);
136                 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
137         } else {
138                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
139                                              | _PAGE_NO_CACHE);
140                 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
141         }
142         vm_insert_pfn(vma, address, pfn);
143
144         spu_release(ctx);
145
146         return NOPFN_REFAULT;
147 }
148
149
150 static struct vm_operations_struct spufs_mem_mmap_vmops = {
151         .nopfn = spufs_mem_mmap_nopfn,
152 };
153
154 static int
155 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
156 {
157         if (!(vma->vm_flags & VM_SHARED))
158                 return -EINVAL;
159
160         vma->vm_flags |= VM_IO | VM_PFNMAP;
161         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
162                                      | _PAGE_NO_CACHE);
163
164         vma->vm_ops = &spufs_mem_mmap_vmops;
165         return 0;
166 }
167
168 static const struct file_operations spufs_mem_fops = {
169         .open    = spufs_mem_open,
170         .release = spufs_mem_release,
171         .read    = spufs_mem_read,
172         .write   = spufs_mem_write,
173         .llseek  = generic_file_llseek,
174         .mmap    = spufs_mem_mmap,
175 };
176
177 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
178                                     unsigned long address,
179                                     unsigned long ps_offs,
180                                     unsigned long ps_size)
181 {
182         struct spu_context *ctx = vma->vm_file->private_data;
183         unsigned long area, offset = address - vma->vm_start;
184         int ret;
185
186         offset += vma->vm_pgoff << PAGE_SHIFT;
187         if (offset >= ps_size)
188                 return NOPFN_SIGBUS;
189
190         /* error here usually means a signal.. we might want to test
191          * the error code more precisely though
192          */
193         ret = spu_acquire_runnable(ctx, 0);
194         if (ret)
195                 return NOPFN_REFAULT;
196
197         area = ctx->spu->problem_phys + ps_offs;
198         vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
199         spu_release(ctx);
200
201         return NOPFN_REFAULT;
202 }
203
204 #if SPUFS_MMAP_4K
205 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
206                                            unsigned long address)
207 {
208         return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
209 }
210
211 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
212         .nopfn = spufs_cntl_mmap_nopfn,
213 };
214
215 /*
216  * mmap support for problem state control area [0x4000 - 0x4fff].
217  */
218 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
219 {
220         if (!(vma->vm_flags & VM_SHARED))
221                 return -EINVAL;
222
223         vma->vm_flags |= VM_IO | VM_PFNMAP;
224         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
225                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
226
227         vma->vm_ops = &spufs_cntl_mmap_vmops;
228         return 0;
229 }
230 #else /* SPUFS_MMAP_4K */
231 #define spufs_cntl_mmap NULL
232 #endif /* !SPUFS_MMAP_4K */
233
234 static u64 spufs_cntl_get(void *data)
235 {
236         struct spu_context *ctx = data;
237         u64 val;
238
239         spu_acquire(ctx);
240         val = ctx->ops->status_read(ctx);
241         spu_release(ctx);
242
243         return val;
244 }
245
246 static void spufs_cntl_set(void *data, u64 val)
247 {
248         struct spu_context *ctx = data;
249
250         spu_acquire(ctx);
251         ctx->ops->runcntl_write(ctx, val);
252         spu_release(ctx);
253 }
254
255 static int spufs_cntl_open(struct inode *inode, struct file *file)
256 {
257         struct spufs_inode_info *i = SPUFS_I(inode);
258         struct spu_context *ctx = i->i_ctx;
259
260         spin_lock(&ctx->mapping_lock);
261         file->private_data = ctx;
262         if (!i->i_openers++)
263                 ctx->cntl = inode->i_mapping;
264         spin_unlock(&ctx->mapping_lock);
265         smp_wmb();
266         return simple_attr_open(inode, file, spufs_cntl_get,
267                                         spufs_cntl_set, "0x%08lx");
268 }
269
270 static int
271 spufs_cntl_release(struct inode *inode, struct file *file)
272 {
273         struct spufs_inode_info *i = SPUFS_I(inode);
274         struct spu_context *ctx = i->i_ctx;
275
276         simple_attr_close(inode, file);
277
278         spin_lock(&ctx->mapping_lock);
279         if (!--i->i_openers)
280                 ctx->cntl = NULL;
281         spin_unlock(&ctx->mapping_lock);
282         smp_wmb();
283         return 0;
284 }
285
286 static const struct file_operations spufs_cntl_fops = {
287         .open = spufs_cntl_open,
288         .release = spufs_cntl_release,
289         .read = simple_attr_read,
290         .write = simple_attr_write,
291         .mmap = spufs_cntl_mmap,
292 };
293
294 static int
295 spufs_regs_open(struct inode *inode, struct file *file)
296 {
297         struct spufs_inode_info *i = SPUFS_I(inode);
298         file->private_data = i->i_ctx;
299         return 0;
300 }
301
302 static ssize_t
303 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
304                         size_t size, loff_t *pos)
305 {
306         struct spu_lscsa *lscsa = ctx->csa.lscsa;
307         return simple_read_from_buffer(buffer, size, pos,
308                                       lscsa->gprs, sizeof lscsa->gprs);
309 }
310
311 static ssize_t
312 spufs_regs_read(struct file *file, char __user *buffer,
313                 size_t size, loff_t *pos)
314 {
315         int ret;
316         struct spu_context *ctx = file->private_data;
317
318         spu_acquire_saved(ctx);
319         ret = __spufs_regs_read(ctx, buffer, size, pos);
320         spu_release(ctx);
321         return ret;
322 }
323
324 static ssize_t
325 spufs_regs_write(struct file *file, const char __user *buffer,
326                  size_t size, loff_t *pos)
327 {
328         struct spu_context *ctx = file->private_data;
329         struct spu_lscsa *lscsa = ctx->csa.lscsa;
330         int ret;
331
332         size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
333         if (size <= 0)
334                 return -EFBIG;
335         *pos += size;
336
337         spu_acquire_saved(ctx);
338
339         ret = copy_from_user(lscsa->gprs + *pos - size,
340                              buffer, size) ? -EFAULT : size;
341
342         spu_release(ctx);
343         return ret;
344 }
345
346 static const struct file_operations spufs_regs_fops = {
347         .open    = spufs_regs_open,
348         .read    = spufs_regs_read,
349         .write   = spufs_regs_write,
350         .llseek  = generic_file_llseek,
351 };
352
353 static ssize_t
354 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
355                         size_t size, loff_t * pos)
356 {
357         struct spu_lscsa *lscsa = ctx->csa.lscsa;
358         return simple_read_from_buffer(buffer, size, pos,
359                                       &lscsa->fpcr, sizeof(lscsa->fpcr));
360 }
361
362 static ssize_t
363 spufs_fpcr_read(struct file *file, char __user * buffer,
364                 size_t size, loff_t * pos)
365 {
366         int ret;
367         struct spu_context *ctx = file->private_data;
368
369         spu_acquire_saved(ctx);
370         ret = __spufs_fpcr_read(ctx, buffer, size, pos);
371         spu_release(ctx);
372         return ret;
373 }
374
375 static ssize_t
376 spufs_fpcr_write(struct file *file, const char __user * buffer,
377                  size_t size, loff_t * pos)
378 {
379         struct spu_context *ctx = file->private_data;
380         struct spu_lscsa *lscsa = ctx->csa.lscsa;
381         int ret;
382
383         size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
384         if (size <= 0)
385                 return -EFBIG;
386         *pos += size;
387
388         spu_acquire_saved(ctx);
389
390         ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
391                              buffer, size) ? -EFAULT : size;
392
393         spu_release(ctx);
394         return ret;
395 }
396
397 static const struct file_operations spufs_fpcr_fops = {
398         .open = spufs_regs_open,
399         .read = spufs_fpcr_read,
400         .write = spufs_fpcr_write,
401         .llseek = generic_file_llseek,
402 };
403
404 /* generic open function for all pipe-like files */
405 static int spufs_pipe_open(struct inode *inode, struct file *file)
406 {
407         struct spufs_inode_info *i = SPUFS_I(inode);
408         file->private_data = i->i_ctx;
409
410         return nonseekable_open(inode, file);
411 }
412
413 /*
414  * Read as many bytes from the mailbox as possible, until
415  * one of the conditions becomes true:
416  *
417  * - no more data available in the mailbox
418  * - end of the user provided buffer
419  * - end of the mapped area
420  */
421 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
422                         size_t len, loff_t *pos)
423 {
424         struct spu_context *ctx = file->private_data;
425         u32 mbox_data, __user *udata;
426         ssize_t count;
427
428         if (len < 4)
429                 return -EINVAL;
430
431         if (!access_ok(VERIFY_WRITE, buf, len))
432                 return -EFAULT;
433
434         udata = (void __user *)buf;
435
436         spu_acquire(ctx);
437         for (count = 0; (count + 4) <= len; count += 4, udata++) {
438                 int ret;
439                 ret = ctx->ops->mbox_read(ctx, &mbox_data);
440                 if (ret == 0)
441                         break;
442
443                 /*
444                  * at the end of the mapped area, we can fault
445                  * but still need to return the data we have
446                  * read successfully so far.
447                  */
448                 ret = __put_user(mbox_data, udata);
449                 if (ret) {
450                         if (!count)
451                                 count = -EFAULT;
452                         break;
453                 }
454         }
455         spu_release(ctx);
456
457         if (!count)
458                 count = -EAGAIN;
459
460         return count;
461 }
462
463 static const struct file_operations spufs_mbox_fops = {
464         .open   = spufs_pipe_open,
465         .read   = spufs_mbox_read,
466 };
467
468 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
469                         size_t len, loff_t *pos)
470 {
471         struct spu_context *ctx = file->private_data;
472         u32 mbox_stat;
473
474         if (len < 4)
475                 return -EINVAL;
476
477         spu_acquire(ctx);
478
479         mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
480
481         spu_release(ctx);
482
483         if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
484                 return -EFAULT;
485
486         return 4;
487 }
488
489 static const struct file_operations spufs_mbox_stat_fops = {
490         .open   = spufs_pipe_open,
491         .read   = spufs_mbox_stat_read,
492 };
493
494 /* low-level ibox access function */
495 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
496 {
497         return ctx->ops->ibox_read(ctx, data);
498 }
499
500 static int spufs_ibox_fasync(int fd, struct file *file, int on)
501 {
502         struct spu_context *ctx = file->private_data;
503
504         return fasync_helper(fd, file, on, &ctx->ibox_fasync);
505 }
506
507 /* interrupt-level ibox callback function. */
508 void spufs_ibox_callback(struct spu *spu)
509 {
510         struct spu_context *ctx = spu->ctx;
511
512         wake_up_all(&ctx->ibox_wq);
513         kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
514 }
515
516 /*
517  * Read as many bytes from the interrupt mailbox as possible, until
518  * one of the conditions becomes true:
519  *
520  * - no more data available in the mailbox
521  * - end of the user provided buffer
522  * - end of the mapped area
523  *
524  * If the file is opened without O_NONBLOCK, we wait here until
525  * any data is available, but return when we have been able to
526  * read something.
527  */
528 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
529                         size_t len, loff_t *pos)
530 {
531         struct spu_context *ctx = file->private_data;
532         u32 ibox_data, __user *udata;
533         ssize_t count;
534
535         if (len < 4)
536                 return -EINVAL;
537
538         if (!access_ok(VERIFY_WRITE, buf, len))
539                 return -EFAULT;
540
541         udata = (void __user *)buf;
542
543         spu_acquire(ctx);
544
545         /* wait only for the first element */
546         count = 0;
547         if (file->f_flags & O_NONBLOCK) {
548                 if (!spu_ibox_read(ctx, &ibox_data))
549                         count = -EAGAIN;
550         } else {
551                 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
552         }
553         if (count)
554                 goto out;
555
556         /* if we can't write at all, return -EFAULT */
557         count = __put_user(ibox_data, udata);
558         if (count)
559                 goto out;
560
561         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
562                 int ret;
563                 ret = ctx->ops->ibox_read(ctx, &ibox_data);
564                 if (ret == 0)
565                         break;
566                 /*
567                  * at the end of the mapped area, we can fault
568                  * but still need to return the data we have
569                  * read successfully so far.
570                  */
571                 ret = __put_user(ibox_data, udata);
572                 if (ret)
573                         break;
574         }
575
576 out:
577         spu_release(ctx);
578
579         return count;
580 }
581
582 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
583 {
584         struct spu_context *ctx = file->private_data;
585         unsigned int mask;
586
587         poll_wait(file, &ctx->ibox_wq, wait);
588
589         spu_acquire(ctx);
590         mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
591         spu_release(ctx);
592
593         return mask;
594 }
595
596 static const struct file_operations spufs_ibox_fops = {
597         .open   = spufs_pipe_open,
598         .read   = spufs_ibox_read,
599         .poll   = spufs_ibox_poll,
600         .fasync = spufs_ibox_fasync,
601 };
602
603 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
604                         size_t len, loff_t *pos)
605 {
606         struct spu_context *ctx = file->private_data;
607         u32 ibox_stat;
608
609         if (len < 4)
610                 return -EINVAL;
611
612         spu_acquire(ctx);
613         ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
614         spu_release(ctx);
615
616         if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
617                 return -EFAULT;
618
619         return 4;
620 }
621
622 static const struct file_operations spufs_ibox_stat_fops = {
623         .open   = spufs_pipe_open,
624         .read   = spufs_ibox_stat_read,
625 };
626
627 /* low-level mailbox write */
628 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
629 {
630         return ctx->ops->wbox_write(ctx, data);
631 }
632
633 static int spufs_wbox_fasync(int fd, struct file *file, int on)
634 {
635         struct spu_context *ctx = file->private_data;
636         int ret;
637
638         ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
639
640         return ret;
641 }
642
643 /* interrupt-level wbox callback function. */
644 void spufs_wbox_callback(struct spu *spu)
645 {
646         struct spu_context *ctx = spu->ctx;
647
648         wake_up_all(&ctx->wbox_wq);
649         kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
650 }
651
652 /*
653  * Write as many bytes to the interrupt mailbox as possible, until
654  * one of the conditions becomes true:
655  *
656  * - the mailbox is full
657  * - end of the user provided buffer
658  * - end of the mapped area
659  *
660  * If the file is opened without O_NONBLOCK, we wait here until
661  * space is availabyl, but return when we have been able to
662  * write something.
663  */
664 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
665                         size_t len, loff_t *pos)
666 {
667         struct spu_context *ctx = file->private_data;
668         u32 wbox_data, __user *udata;
669         ssize_t count;
670
671         if (len < 4)
672                 return -EINVAL;
673
674         udata = (void __user *)buf;
675         if (!access_ok(VERIFY_READ, buf, len))
676                 return -EFAULT;
677
678         if (__get_user(wbox_data, udata))
679                 return -EFAULT;
680
681         spu_acquire(ctx);
682
683         /*
684          * make sure we can at least write one element, by waiting
685          * in case of !O_NONBLOCK
686          */
687         count = 0;
688         if (file->f_flags & O_NONBLOCK) {
689                 if (!spu_wbox_write(ctx, wbox_data))
690                         count = -EAGAIN;
691         } else {
692                 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
693         }
694
695         if (count)
696                 goto out;
697
698         /* write aÑ• much as possible */
699         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
700                 int ret;
701                 ret = __get_user(wbox_data, udata);
702                 if (ret)
703                         break;
704
705                 ret = spu_wbox_write(ctx, wbox_data);
706                 if (ret == 0)
707                         break;
708         }
709
710 out:
711         spu_release(ctx);
712         return count;
713 }
714
715 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
716 {
717         struct spu_context *ctx = file->private_data;
718         unsigned int mask;
719
720         poll_wait(file, &ctx->wbox_wq, wait);
721
722         spu_acquire(ctx);
723         mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
724         spu_release(ctx);
725
726         return mask;
727 }
728
729 static const struct file_operations spufs_wbox_fops = {
730         .open   = spufs_pipe_open,
731         .write  = spufs_wbox_write,
732         .poll   = spufs_wbox_poll,
733         .fasync = spufs_wbox_fasync,
734 };
735
736 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
737                         size_t len, loff_t *pos)
738 {
739         struct spu_context *ctx = file->private_data;
740         u32 wbox_stat;
741
742         if (len < 4)
743                 return -EINVAL;
744
745         spu_acquire(ctx);
746         wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
747         spu_release(ctx);
748
749         if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
750                 return -EFAULT;
751
752         return 4;
753 }
754
755 static const struct file_operations spufs_wbox_stat_fops = {
756         .open   = spufs_pipe_open,
757         .read   = spufs_wbox_stat_read,
758 };
759
760 static int spufs_signal1_open(struct inode *inode, struct file *file)
761 {
762         struct spufs_inode_info *i = SPUFS_I(inode);
763         struct spu_context *ctx = i->i_ctx;
764
765         spin_lock(&ctx->mapping_lock);
766         file->private_data = ctx;
767         if (!i->i_openers++)
768                 ctx->signal1 = inode->i_mapping;
769         spin_unlock(&ctx->mapping_lock);
770         smp_wmb();
771         return nonseekable_open(inode, file);
772 }
773
774 static int
775 spufs_signal1_release(struct inode *inode, struct file *file)
776 {
777         struct spufs_inode_info *i = SPUFS_I(inode);
778         struct spu_context *ctx = i->i_ctx;
779
780         spin_lock(&ctx->mapping_lock);
781         if (!--i->i_openers)
782                 ctx->signal1 = NULL;
783         spin_unlock(&ctx->mapping_lock);
784         smp_wmb();
785         return 0;
786 }
787
788 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
789                         size_t len, loff_t *pos)
790 {
791         int ret = 0;
792         u32 data;
793
794         if (len < 4)
795                 return -EINVAL;
796
797         if (ctx->csa.spu_chnlcnt_RW[3]) {
798                 data = ctx->csa.spu_chnldata_RW[3];
799                 ret = 4;
800         }
801
802         if (!ret)
803                 goto out;
804
805         if (copy_to_user(buf, &data, 4))
806                 return -EFAULT;
807
808 out:
809         return ret;
810 }
811
812 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
813                         size_t len, loff_t *pos)
814 {
815         int ret;
816         struct spu_context *ctx = file->private_data;
817
818         spu_acquire_saved(ctx);
819         ret = __spufs_signal1_read(ctx, buf, len, pos);
820         spu_release(ctx);
821
822         return ret;
823 }
824
825 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
826                         size_t len, loff_t *pos)
827 {
828         struct spu_context *ctx;
829         u32 data;
830
831         ctx = file->private_data;
832
833         if (len < 4)
834                 return -EINVAL;
835
836         if (copy_from_user(&data, buf, 4))
837                 return -EFAULT;
838
839         spu_acquire(ctx);
840         ctx->ops->signal1_write(ctx, data);
841         spu_release(ctx);
842
843         return 4;
844 }
845
846 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
847                                               unsigned long address)
848 {
849 #if PAGE_SIZE == 0x1000
850         return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
851 #elif PAGE_SIZE == 0x10000
852         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
853          * signal 1 and 2 area
854          */
855         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
856 #else
857 #error unsupported page size
858 #endif
859 }
860
861 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
862         .nopfn = spufs_signal1_mmap_nopfn,
863 };
864
865 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
866 {
867         if (!(vma->vm_flags & VM_SHARED))
868                 return -EINVAL;
869
870         vma->vm_flags |= VM_IO | VM_PFNMAP;
871         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
872                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
873
874         vma->vm_ops = &spufs_signal1_mmap_vmops;
875         return 0;
876 }
877
878 static const struct file_operations spufs_signal1_fops = {
879         .open = spufs_signal1_open,
880         .release = spufs_signal1_release,
881         .read = spufs_signal1_read,
882         .write = spufs_signal1_write,
883         .mmap = spufs_signal1_mmap,
884 };
885
886 static int spufs_signal2_open(struct inode *inode, struct file *file)
887 {
888         struct spufs_inode_info *i = SPUFS_I(inode);
889         struct spu_context *ctx = i->i_ctx;
890
891         spin_lock(&ctx->mapping_lock);
892         file->private_data = ctx;
893         if (!i->i_openers++)
894                 ctx->signal2 = inode->i_mapping;
895         spin_unlock(&ctx->mapping_lock);
896         smp_wmb();
897         return nonseekable_open(inode, file);
898 }
899
900 static int
901 spufs_signal2_release(struct inode *inode, struct file *file)
902 {
903         struct spufs_inode_info *i = SPUFS_I(inode);
904         struct spu_context *ctx = i->i_ctx;
905
906         spin_lock(&ctx->mapping_lock);
907         if (!--i->i_openers)
908                 ctx->signal2 = NULL;
909         spin_unlock(&ctx->mapping_lock);
910         smp_wmb();
911         return 0;
912 }
913
914 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
915                         size_t len, loff_t *pos)
916 {
917         int ret = 0;
918         u32 data;
919
920         if (len < 4)
921                 return -EINVAL;
922
923         if (ctx->csa.spu_chnlcnt_RW[4]) {
924                 data =  ctx->csa.spu_chnldata_RW[4];
925                 ret = 4;
926         }
927
928         if (!ret)
929                 goto out;
930
931         if (copy_to_user(buf, &data, 4))
932                 return -EFAULT;
933
934 out:
935         return ret;
936 }
937
938 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
939                         size_t len, loff_t *pos)
940 {
941         struct spu_context *ctx = file->private_data;
942         int ret;
943
944         spu_acquire_saved(ctx);
945         ret = __spufs_signal2_read(ctx, buf, len, pos);
946         spu_release(ctx);
947
948         return ret;
949 }
950
951 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
952                         size_t len, loff_t *pos)
953 {
954         struct spu_context *ctx;
955         u32 data;
956
957         ctx = file->private_data;
958
959         if (len < 4)
960                 return -EINVAL;
961
962         if (copy_from_user(&data, buf, 4))
963                 return -EFAULT;
964
965         spu_acquire(ctx);
966         ctx->ops->signal2_write(ctx, data);
967         spu_release(ctx);
968
969         return 4;
970 }
971
972 #if SPUFS_MMAP_4K
973 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
974                                               unsigned long address)
975 {
976 #if PAGE_SIZE == 0x1000
977         return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
978 #elif PAGE_SIZE == 0x10000
979         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
980          * signal 1 and 2 area
981          */
982         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
983 #else
984 #error unsupported page size
985 #endif
986 }
987
988 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
989         .nopfn = spufs_signal2_mmap_nopfn,
990 };
991
992 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
993 {
994         if (!(vma->vm_flags & VM_SHARED))
995                 return -EINVAL;
996
997         vma->vm_flags |= VM_IO | VM_PFNMAP;
998         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
999                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1000
1001         vma->vm_ops = &spufs_signal2_mmap_vmops;
1002         return 0;
1003 }
1004 #else /* SPUFS_MMAP_4K */
1005 #define spufs_signal2_mmap NULL
1006 #endif /* !SPUFS_MMAP_4K */
1007
1008 static const struct file_operations spufs_signal2_fops = {
1009         .open = spufs_signal2_open,
1010         .release = spufs_signal2_release,
1011         .read = spufs_signal2_read,
1012         .write = spufs_signal2_write,
1013         .mmap = spufs_signal2_mmap,
1014 };
1015
1016 static void spufs_signal1_type_set(void *data, u64 val)
1017 {
1018         struct spu_context *ctx = data;
1019
1020         spu_acquire(ctx);
1021         ctx->ops->signal1_type_set(ctx, val);
1022         spu_release(ctx);
1023 }
1024
1025 static u64 __spufs_signal1_type_get(void *data)
1026 {
1027         struct spu_context *ctx = data;
1028         return ctx->ops->signal1_type_get(ctx);
1029 }
1030
1031 static u64 spufs_signal1_type_get(void *data)
1032 {
1033         struct spu_context *ctx = data;
1034         u64 ret;
1035
1036         spu_acquire(ctx);
1037         ret = __spufs_signal1_type_get(data);
1038         spu_release(ctx);
1039
1040         return ret;
1041 }
1042 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1043                                         spufs_signal1_type_set, "%llu");
1044
1045 static void spufs_signal2_type_set(void *data, u64 val)
1046 {
1047         struct spu_context *ctx = data;
1048
1049         spu_acquire(ctx);
1050         ctx->ops->signal2_type_set(ctx, val);
1051         spu_release(ctx);
1052 }
1053
1054 static u64 __spufs_signal2_type_get(void *data)
1055 {
1056         struct spu_context *ctx = data;
1057         return ctx->ops->signal2_type_get(ctx);
1058 }
1059
1060 static u64 spufs_signal2_type_get(void *data)
1061 {
1062         struct spu_context *ctx = data;
1063         u64 ret;
1064
1065         spu_acquire(ctx);
1066         ret = __spufs_signal2_type_get(data);
1067         spu_release(ctx);
1068
1069         return ret;
1070 }
1071 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1072                                         spufs_signal2_type_set, "%llu");
1073
1074 #if SPUFS_MMAP_4K
1075 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1076                                           unsigned long address)
1077 {
1078         return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1079 }
1080
1081 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1082         .nopfn = spufs_mss_mmap_nopfn,
1083 };
1084
1085 /*
1086  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1087  */
1088 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1089 {
1090         if (!(vma->vm_flags & VM_SHARED))
1091                 return -EINVAL;
1092
1093         vma->vm_flags |= VM_IO | VM_PFNMAP;
1094         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1095                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1096
1097         vma->vm_ops = &spufs_mss_mmap_vmops;
1098         return 0;
1099 }
1100 #else /* SPUFS_MMAP_4K */
1101 #define spufs_mss_mmap NULL
1102 #endif /* !SPUFS_MMAP_4K */
1103
1104 static int spufs_mss_open(struct inode *inode, struct file *file)
1105 {
1106         struct spufs_inode_info *i = SPUFS_I(inode);
1107         struct spu_context *ctx = i->i_ctx;
1108
1109         file->private_data = i->i_ctx;
1110
1111         spin_lock(&ctx->mapping_lock);
1112         if (!i->i_openers++)
1113                 ctx->mss = inode->i_mapping;
1114         spin_unlock(&ctx->mapping_lock);
1115         smp_wmb();
1116         return nonseekable_open(inode, file);
1117 }
1118
1119 static int
1120 spufs_mss_release(struct inode *inode, struct file *file)
1121 {
1122         struct spufs_inode_info *i = SPUFS_I(inode);
1123         struct spu_context *ctx = i->i_ctx;
1124
1125         spin_lock(&ctx->mapping_lock);
1126         if (!--i->i_openers)
1127                 ctx->mss = NULL;
1128         spin_unlock(&ctx->mapping_lock);
1129         smp_wmb();
1130         return 0;
1131 }
1132
1133 static const struct file_operations spufs_mss_fops = {
1134         .open    = spufs_mss_open,
1135         .release = spufs_mss_release,
1136         .mmap    = spufs_mss_mmap,
1137 };
1138
1139 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1140                                             unsigned long address)
1141 {
1142         return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1143 }
1144
1145 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1146         .nopfn = spufs_psmap_mmap_nopfn,
1147 };
1148
1149 /*
1150  * mmap support for full problem state area [0x00000 - 0x1ffff].
1151  */
1152 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1153 {
1154         if (!(vma->vm_flags & VM_SHARED))
1155                 return -EINVAL;
1156
1157         vma->vm_flags |= VM_IO | VM_PFNMAP;
1158         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1159                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1160
1161         vma->vm_ops = &spufs_psmap_mmap_vmops;
1162         return 0;
1163 }
1164
1165 static int spufs_psmap_open(struct inode *inode, struct file *file)
1166 {
1167         struct spufs_inode_info *i = SPUFS_I(inode);
1168         struct spu_context *ctx = i->i_ctx;
1169
1170         spin_lock(&ctx->mapping_lock);
1171         file->private_data = i->i_ctx;
1172         if (!i->i_openers++)
1173                 ctx->psmap = inode->i_mapping;
1174         spin_unlock(&ctx->mapping_lock);
1175         smp_wmb();
1176         return nonseekable_open(inode, file);
1177 }
1178
1179 static int
1180 spufs_psmap_release(struct inode *inode, struct file *file)
1181 {
1182         struct spufs_inode_info *i = SPUFS_I(inode);
1183         struct spu_context *ctx = i->i_ctx;
1184
1185         spin_lock(&ctx->mapping_lock);
1186         if (!--i->i_openers)
1187                 ctx->psmap = NULL;
1188         spin_unlock(&ctx->mapping_lock);
1189         smp_wmb();
1190         return 0;
1191 }
1192
1193 static const struct file_operations spufs_psmap_fops = {
1194         .open    = spufs_psmap_open,
1195         .release = spufs_psmap_release,
1196         .mmap    = spufs_psmap_mmap,
1197 };
1198
1199
1200 #if SPUFS_MMAP_4K
1201 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1202                                           unsigned long address)
1203 {
1204         return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1205 }
1206
1207 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1208         .nopfn = spufs_mfc_mmap_nopfn,
1209 };
1210
1211 /*
1212  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1213  */
1214 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1215 {
1216         if (!(vma->vm_flags & VM_SHARED))
1217                 return -EINVAL;
1218
1219         vma->vm_flags |= VM_IO | VM_PFNMAP;
1220         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1221                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1222
1223         vma->vm_ops = &spufs_mfc_mmap_vmops;
1224         return 0;
1225 }
1226 #else /* SPUFS_MMAP_4K */
1227 #define spufs_mfc_mmap NULL
1228 #endif /* !SPUFS_MMAP_4K */
1229
1230 static int spufs_mfc_open(struct inode *inode, struct file *file)
1231 {
1232         struct spufs_inode_info *i = SPUFS_I(inode);
1233         struct spu_context *ctx = i->i_ctx;
1234
1235         /* we don't want to deal with DMA into other processes */
1236         if (ctx->owner != current->mm)
1237                 return -EINVAL;
1238
1239         if (atomic_read(&inode->i_count) != 1)
1240                 return -EBUSY;
1241
1242         spin_lock(&ctx->mapping_lock);
1243         file->private_data = ctx;
1244         if (!i->i_openers++)
1245                 ctx->mfc = inode->i_mapping;
1246         spin_unlock(&ctx->mapping_lock);
1247         smp_wmb();
1248         return nonseekable_open(inode, file);
1249 }
1250
1251 static int
1252 spufs_mfc_release(struct inode *inode, struct file *file)
1253 {
1254         struct spufs_inode_info *i = SPUFS_I(inode);
1255         struct spu_context *ctx = i->i_ctx;
1256
1257         spin_lock(&ctx->mapping_lock);
1258         if (!--i->i_openers)
1259                 ctx->mfc = NULL;
1260         spin_unlock(&ctx->mapping_lock);
1261         smp_wmb();
1262         return 0;
1263 }
1264
1265 /* interrupt-level mfc callback function. */
1266 void spufs_mfc_callback(struct spu *spu)
1267 {
1268         struct spu_context *ctx = spu->ctx;
1269
1270         wake_up_all(&ctx->mfc_wq);
1271
1272         pr_debug("%s %s\n", __FUNCTION__, spu->name);
1273         if (ctx->mfc_fasync) {
1274                 u32 free_elements, tagstatus;
1275                 unsigned int mask;
1276
1277                 /* no need for spu_acquire in interrupt context */
1278                 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1279                 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1280
1281                 mask = 0;
1282                 if (free_elements & 0xffff)
1283                         mask |= POLLOUT;
1284                 if (tagstatus & ctx->tagwait)
1285                         mask |= POLLIN;
1286
1287                 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1288         }
1289 }
1290
1291 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1292 {
1293         /* See if there is one tag group is complete */
1294         /* FIXME we need locking around tagwait */
1295         *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1296         ctx->tagwait &= ~*status;
1297         if (*status)
1298                 return 1;
1299
1300         /* enable interrupt waiting for any tag group,
1301            may silently fail if interrupts are already enabled */
1302         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1303         return 0;
1304 }
1305
1306 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1307                         size_t size, loff_t *pos)
1308 {
1309         struct spu_context *ctx = file->private_data;
1310         int ret = -EINVAL;
1311         u32 status;
1312
1313         if (size != 4)
1314                 goto out;
1315
1316         spu_acquire(ctx);
1317         if (file->f_flags & O_NONBLOCK) {
1318                 status = ctx->ops->read_mfc_tagstatus(ctx);
1319                 if (!(status & ctx->tagwait))
1320                         ret = -EAGAIN;
1321                 else
1322                         ctx->tagwait &= ~status;
1323         } else {
1324                 ret = spufs_wait(ctx->mfc_wq,
1325                            spufs_read_mfc_tagstatus(ctx, &status));
1326         }
1327         spu_release(ctx);
1328
1329         if (ret)
1330                 goto out;
1331
1332         ret = 4;
1333         if (copy_to_user(buffer, &status, 4))
1334                 ret = -EFAULT;
1335
1336 out:
1337         return ret;
1338 }
1339
1340 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1341 {
1342         pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1343                  cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1344
1345         switch (cmd->cmd) {
1346         case MFC_PUT_CMD:
1347         case MFC_PUTF_CMD:
1348         case MFC_PUTB_CMD:
1349         case MFC_GET_CMD:
1350         case MFC_GETF_CMD:
1351         case MFC_GETB_CMD:
1352                 break;
1353         default:
1354                 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1355                 return -EIO;
1356         }
1357
1358         if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1359                 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1360                                 cmd->ea, cmd->lsa);
1361                 return -EIO;
1362         }
1363
1364         switch (cmd->size & 0xf) {
1365         case 1:
1366                 break;
1367         case 2:
1368                 if (cmd->lsa & 1)
1369                         goto error;
1370                 break;
1371         case 4:
1372                 if (cmd->lsa & 3)
1373                         goto error;
1374                 break;
1375         case 8:
1376                 if (cmd->lsa & 7)
1377                         goto error;
1378                 break;
1379         case 0:
1380                 if (cmd->lsa & 15)
1381                         goto error;
1382                 break;
1383         error:
1384         default:
1385                 pr_debug("invalid DMA alignment %x for size %x\n",
1386                         cmd->lsa & 0xf, cmd->size);
1387                 return -EIO;
1388         }
1389
1390         if (cmd->size > 16 * 1024) {
1391                 pr_debug("invalid DMA size %x\n", cmd->size);
1392                 return -EIO;
1393         }
1394
1395         if (cmd->tag & 0xfff0) {
1396                 /* we reserve the higher tag numbers for kernel use */
1397                 pr_debug("invalid DMA tag\n");
1398                 return -EIO;
1399         }
1400
1401         if (cmd->class) {
1402                 /* not supported in this version */
1403                 pr_debug("invalid DMA class\n");
1404                 return -EIO;
1405         }
1406
1407         return 0;
1408 }
1409
1410 static int spu_send_mfc_command(struct spu_context *ctx,
1411                                 struct mfc_dma_command cmd,
1412                                 int *error)
1413 {
1414         *error = ctx->ops->send_mfc_command(ctx, &cmd);
1415         if (*error == -EAGAIN) {
1416                 /* wait for any tag group to complete
1417                    so we have space for the new command */
1418                 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1419                 /* try again, because the queue might be
1420                    empty again */
1421                 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1422                 if (*error == -EAGAIN)
1423                         return 0;
1424         }
1425         return 1;
1426 }
1427
1428 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1429                         size_t size, loff_t *pos)
1430 {
1431         struct spu_context *ctx = file->private_data;
1432         struct mfc_dma_command cmd;
1433         int ret = -EINVAL;
1434
1435         if (size != sizeof cmd)
1436                 goto out;
1437
1438         ret = -EFAULT;
1439         if (copy_from_user(&cmd, buffer, sizeof cmd))
1440                 goto out;
1441
1442         ret = spufs_check_valid_dma(&cmd);
1443         if (ret)
1444                 goto out;
1445
1446         ret = spu_acquire_runnable(ctx, 0);
1447         if (ret)
1448                 goto out;
1449
1450         if (file->f_flags & O_NONBLOCK) {
1451                 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1452         } else {
1453                 int status;
1454                 ret = spufs_wait(ctx->mfc_wq,
1455                                  spu_send_mfc_command(ctx, cmd, &status));
1456                 if (status)
1457                         ret = status;
1458         }
1459         spu_release(ctx);
1460
1461         if (ret)
1462                 goto out;
1463
1464         ctx->tagwait |= 1 << cmd.tag;
1465         ret = size;
1466
1467 out:
1468         return ret;
1469 }
1470
1471 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1472 {
1473         struct spu_context *ctx = file->private_data;
1474         u32 free_elements, tagstatus;
1475         unsigned int mask;
1476
1477         spu_acquire(ctx);
1478         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1479         free_elements = ctx->ops->get_mfc_free_elements(ctx);
1480         tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1481         spu_release(ctx);
1482
1483         poll_wait(file, &ctx->mfc_wq, wait);
1484
1485         mask = 0;
1486         if (free_elements & 0xffff)
1487                 mask |= POLLOUT | POLLWRNORM;
1488         if (tagstatus & ctx->tagwait)
1489                 mask |= POLLIN | POLLRDNORM;
1490
1491         pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1492                 free_elements, tagstatus, ctx->tagwait);
1493
1494         return mask;
1495 }
1496
1497 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1498 {
1499         struct spu_context *ctx = file->private_data;
1500         int ret;
1501
1502         spu_acquire(ctx);
1503 #if 0
1504 /* this currently hangs */
1505         ret = spufs_wait(ctx->mfc_wq,
1506                          ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1507         if (ret)
1508                 goto out;
1509         ret = spufs_wait(ctx->mfc_wq,
1510                          ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1511 out:
1512 #else
1513         ret = 0;
1514 #endif
1515         spu_release(ctx);
1516
1517         return ret;
1518 }
1519
1520 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1521                            int datasync)
1522 {
1523         return spufs_mfc_flush(file, NULL);
1524 }
1525
1526 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1527 {
1528         struct spu_context *ctx = file->private_data;
1529
1530         return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1531 }
1532
1533 static const struct file_operations spufs_mfc_fops = {
1534         .open    = spufs_mfc_open,
1535         .release = spufs_mfc_release,
1536         .read    = spufs_mfc_read,
1537         .write   = spufs_mfc_write,
1538         .poll    = spufs_mfc_poll,
1539         .flush   = spufs_mfc_flush,
1540         .fsync   = spufs_mfc_fsync,
1541         .fasync  = spufs_mfc_fasync,
1542         .mmap    = spufs_mfc_mmap,
1543 };
1544
1545 static void spufs_npc_set(void *data, u64 val)
1546 {
1547         struct spu_context *ctx = data;
1548         spu_acquire(ctx);
1549         ctx->ops->npc_write(ctx, val);
1550         spu_release(ctx);
1551 }
1552
1553 static u64 spufs_npc_get(void *data)
1554 {
1555         struct spu_context *ctx = data;
1556         u64 ret;
1557         spu_acquire(ctx);
1558         ret = ctx->ops->npc_read(ctx);
1559         spu_release(ctx);
1560         return ret;
1561 }
1562 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1563                         "0x%llx\n")
1564
1565 static void spufs_decr_set(void *data, u64 val)
1566 {
1567         struct spu_context *ctx = data;
1568         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1569         spu_acquire_saved(ctx);
1570         lscsa->decr.slot[0] = (u32) val;
1571         spu_release(ctx);
1572 }
1573
1574 static u64 __spufs_decr_get(void *data)
1575 {
1576         struct spu_context *ctx = data;
1577         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1578         return lscsa->decr.slot[0];
1579 }
1580
1581 static u64 spufs_decr_get(void *data)
1582 {
1583         struct spu_context *ctx = data;
1584         u64 ret;
1585         spu_acquire_saved(ctx);
1586         ret = __spufs_decr_get(data);
1587         spu_release(ctx);
1588         return ret;
1589 }
1590 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1591                         "0x%llx\n")
1592
1593 static void spufs_decr_status_set(void *data, u64 val)
1594 {
1595         struct spu_context *ctx = data;
1596         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1597         spu_acquire_saved(ctx);
1598         lscsa->decr_status.slot[0] = (u32) val;
1599         spu_release(ctx);
1600 }
1601
1602 static u64 __spufs_decr_status_get(void *data)
1603 {
1604         struct spu_context *ctx = data;
1605         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1606         return lscsa->decr_status.slot[0];
1607 }
1608
1609 static u64 spufs_decr_status_get(void *data)
1610 {
1611         struct spu_context *ctx = data;
1612         u64 ret;
1613         spu_acquire_saved(ctx);
1614         ret = __spufs_decr_status_get(data);
1615         spu_release(ctx);
1616         return ret;
1617 }
1618 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1619                         spufs_decr_status_set, "0x%llx\n")
1620
1621 static void spufs_event_mask_set(void *data, u64 val)
1622 {
1623         struct spu_context *ctx = data;
1624         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1625         spu_acquire_saved(ctx);
1626         lscsa->event_mask.slot[0] = (u32) val;
1627         spu_release(ctx);
1628 }
1629
1630 static u64 __spufs_event_mask_get(void *data)
1631 {
1632         struct spu_context *ctx = data;
1633         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1634         return lscsa->event_mask.slot[0];
1635 }
1636
1637 static u64 spufs_event_mask_get(void *data)
1638 {
1639         struct spu_context *ctx = data;
1640         u64 ret;
1641         spu_acquire_saved(ctx);
1642         ret = __spufs_event_mask_get(data);
1643         spu_release(ctx);
1644         return ret;
1645 }
1646 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1647                         spufs_event_mask_set, "0x%llx\n")
1648
1649 static u64 __spufs_event_status_get(void *data)
1650 {
1651         struct spu_context *ctx = data;
1652         struct spu_state *state = &ctx->csa;
1653         u64 stat;
1654         stat = state->spu_chnlcnt_RW[0];
1655         if (stat)
1656                 return state->spu_chnldata_RW[0];
1657         return 0;
1658 }
1659
1660 static u64 spufs_event_status_get(void *data)
1661 {
1662         struct spu_context *ctx = data;
1663         u64 ret = 0;
1664
1665         spu_acquire_saved(ctx);
1666         ret = __spufs_event_status_get(data);
1667         spu_release(ctx);
1668         return ret;
1669 }
1670 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1671                         NULL, "0x%llx\n")
1672
1673 static void spufs_srr0_set(void *data, u64 val)
1674 {
1675         struct spu_context *ctx = data;
1676         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1677         spu_acquire_saved(ctx);
1678         lscsa->srr0.slot[0] = (u32) val;
1679         spu_release(ctx);
1680 }
1681
1682 static u64 spufs_srr0_get(void *data)
1683 {
1684         struct spu_context *ctx = data;
1685         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1686         u64 ret;
1687         spu_acquire_saved(ctx);
1688         ret = lscsa->srr0.slot[0];
1689         spu_release(ctx);
1690         return ret;
1691 }
1692 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1693                         "0x%llx\n")
1694
1695 static u64 spufs_id_get(void *data)
1696 {
1697         struct spu_context *ctx = data;
1698         u64 num;
1699
1700         spu_acquire(ctx);
1701         if (ctx->state == SPU_STATE_RUNNABLE)
1702                 num = ctx->spu->number;
1703         else
1704                 num = (unsigned int)-1;
1705         spu_release(ctx);
1706
1707         return num;
1708 }
1709 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1710
1711 static u64 __spufs_object_id_get(void *data)
1712 {
1713         struct spu_context *ctx = data;
1714         return ctx->object_id;
1715 }
1716
1717 static u64 spufs_object_id_get(void *data)
1718 {
1719         /* FIXME: Should there really be no locking here? */
1720         return __spufs_object_id_get(data);
1721 }
1722
1723 static void spufs_object_id_set(void *data, u64 id)
1724 {
1725         struct spu_context *ctx = data;
1726         ctx->object_id = id;
1727 }
1728
1729 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1730                 spufs_object_id_set, "0x%llx\n");
1731
1732 static u64 __spufs_lslr_get(void *data)
1733 {
1734         struct spu_context *ctx = data;
1735         return ctx->csa.priv2.spu_lslr_RW;
1736 }
1737
1738 static u64 spufs_lslr_get(void *data)
1739 {
1740         struct spu_context *ctx = data;
1741         u64 ret;
1742
1743         spu_acquire_saved(ctx);
1744         ret = __spufs_lslr_get(data);
1745         spu_release(ctx);
1746
1747         return ret;
1748 }
1749 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1750
1751 static int spufs_info_open(struct inode *inode, struct file *file)
1752 {
1753         struct spufs_inode_info *i = SPUFS_I(inode);
1754         struct spu_context *ctx = i->i_ctx;
1755         file->private_data = ctx;
1756         return 0;
1757 }
1758
1759 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1760                         char __user *buf, size_t len, loff_t *pos)
1761 {
1762         u32 mbox_stat;
1763         u32 data;
1764
1765         mbox_stat = ctx->csa.prob.mb_stat_R;
1766         if (mbox_stat & 0x0000ff) {
1767                 data = ctx->csa.prob.pu_mb_R;
1768         }
1769
1770         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1771 }
1772
1773 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1774                                    size_t len, loff_t *pos)
1775 {
1776         int ret;
1777         struct spu_context *ctx = file->private_data;
1778
1779         if (!access_ok(VERIFY_WRITE, buf, len))
1780                 return -EFAULT;
1781
1782         spu_acquire_saved(ctx);
1783         spin_lock(&ctx->csa.register_lock);
1784         ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1785         spin_unlock(&ctx->csa.register_lock);
1786         spu_release(ctx);
1787
1788         return ret;
1789 }
1790
1791 static const struct file_operations spufs_mbox_info_fops = {
1792         .open = spufs_info_open,
1793         .read = spufs_mbox_info_read,
1794         .llseek  = generic_file_llseek,
1795 };
1796
1797 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1798                                 char __user *buf, size_t len, loff_t *pos)
1799 {
1800         u32 ibox_stat;
1801         u32 data;
1802
1803         ibox_stat = ctx->csa.prob.mb_stat_R;
1804         if (ibox_stat & 0xff0000) {
1805                 data = ctx->csa.priv2.puint_mb_R;
1806         }
1807
1808         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1809 }
1810
1811 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1812                                    size_t len, loff_t *pos)
1813 {
1814         struct spu_context *ctx = file->private_data;
1815         int ret;
1816
1817         if (!access_ok(VERIFY_WRITE, buf, len))
1818                 return -EFAULT;
1819
1820         spu_acquire_saved(ctx);
1821         spin_lock(&ctx->csa.register_lock);
1822         ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1823         spin_unlock(&ctx->csa.register_lock);
1824         spu_release(ctx);
1825
1826         return ret;
1827 }
1828
1829 static const struct file_operations spufs_ibox_info_fops = {
1830         .open = spufs_info_open,
1831         .read = spufs_ibox_info_read,
1832         .llseek  = generic_file_llseek,
1833 };
1834
1835 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1836                         char __user *buf, size_t len, loff_t *pos)
1837 {
1838         int i, cnt;
1839         u32 data[4];
1840         u32 wbox_stat;
1841
1842         wbox_stat = ctx->csa.prob.mb_stat_R;
1843         cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1844         for (i = 0; i < cnt; i++) {
1845                 data[i] = ctx->csa.spu_mailbox_data[i];
1846         }
1847
1848         return simple_read_from_buffer(buf, len, pos, &data,
1849                                 cnt * sizeof(u32));
1850 }
1851
1852 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1853                                    size_t len, loff_t *pos)
1854 {
1855         struct spu_context *ctx = file->private_data;
1856         int ret;
1857
1858         if (!access_ok(VERIFY_WRITE, buf, len))
1859                 return -EFAULT;
1860
1861         spu_acquire_saved(ctx);
1862         spin_lock(&ctx->csa.register_lock);
1863         ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1864         spin_unlock(&ctx->csa.register_lock);
1865         spu_release(ctx);
1866
1867         return ret;
1868 }
1869
1870 static const struct file_operations spufs_wbox_info_fops = {
1871         .open = spufs_info_open,
1872         .read = spufs_wbox_info_read,
1873         .llseek  = generic_file_llseek,
1874 };
1875
1876 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1877                         char __user *buf, size_t len, loff_t *pos)
1878 {
1879         struct spu_dma_info info;
1880         struct mfc_cq_sr *qp, *spuqp;
1881         int i;
1882
1883         info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1884         info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1885         info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1886         info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1887         info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1888         for (i = 0; i < 16; i++) {
1889                 qp = &info.dma_info_command_data[i];
1890                 spuqp = &ctx->csa.priv2.spuq[i];
1891
1892                 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1893                 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1894                 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1895                 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1896         }
1897
1898         return simple_read_from_buffer(buf, len, pos, &info,
1899                                 sizeof info);
1900 }
1901
1902 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1903                               size_t len, loff_t *pos)
1904 {
1905         struct spu_context *ctx = file->private_data;
1906         int ret;
1907
1908         if (!access_ok(VERIFY_WRITE, buf, len))
1909                 return -EFAULT;
1910
1911         spu_acquire_saved(ctx);
1912         spin_lock(&ctx->csa.register_lock);
1913         ret = __spufs_dma_info_read(ctx, buf, len, pos);
1914         spin_unlock(&ctx->csa.register_lock);
1915         spu_release(ctx);
1916
1917         return ret;
1918 }
1919
1920 static const struct file_operations spufs_dma_info_fops = {
1921         .open = spufs_info_open,
1922         .read = spufs_dma_info_read,
1923 };
1924
1925 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1926                         char __user *buf, size_t len, loff_t *pos)
1927 {
1928         struct spu_proxydma_info info;
1929         struct mfc_cq_sr *qp, *puqp;
1930         int ret = sizeof info;
1931         int i;
1932
1933         if (len < ret)
1934                 return -EINVAL;
1935
1936         if (!access_ok(VERIFY_WRITE, buf, len))
1937                 return -EFAULT;
1938
1939         info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1940         info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1941         info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1942         for (i = 0; i < 8; i++) {
1943                 qp = &info.proxydma_info_command_data[i];
1944                 puqp = &ctx->csa.priv2.puq[i];
1945
1946                 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1947                 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1948                 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1949                 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1950         }
1951
1952         return simple_read_from_buffer(buf, len, pos, &info,
1953                                 sizeof info);
1954 }
1955
1956 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1957                                    size_t len, loff_t *pos)
1958 {
1959         struct spu_context *ctx = file->private_data;
1960         int ret;
1961
1962         spu_acquire_saved(ctx);
1963         spin_lock(&ctx->csa.register_lock);
1964         ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1965         spin_unlock(&ctx->csa.register_lock);
1966         spu_release(ctx);
1967
1968         return ret;
1969 }
1970
1971 static const struct file_operations spufs_proxydma_info_fops = {
1972         .open = spufs_info_open,
1973         .read = spufs_proxydma_info_read,
1974 };
1975
1976 struct tree_descr spufs_dir_contents[] = {
1977         { "mem",  &spufs_mem_fops,  0666, },
1978         { "regs", &spufs_regs_fops,  0666, },
1979         { "mbox", &spufs_mbox_fops, 0444, },
1980         { "ibox", &spufs_ibox_fops, 0444, },
1981         { "wbox", &spufs_wbox_fops, 0222, },
1982         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1983         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1984         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1985         { "signal1", &spufs_signal1_fops, 0666, },
1986         { "signal2", &spufs_signal2_fops, 0666, },
1987         { "signal1_type", &spufs_signal1_type, 0666, },
1988         { "signal2_type", &spufs_signal2_type, 0666, },
1989         { "cntl", &spufs_cntl_fops,  0666, },
1990         { "fpcr", &spufs_fpcr_fops, 0666, },
1991         { "lslr", &spufs_lslr_ops, 0444, },
1992         { "mfc", &spufs_mfc_fops, 0666, },
1993         { "mss", &spufs_mss_fops, 0666, },
1994         { "npc", &spufs_npc_ops, 0666, },
1995         { "srr0", &spufs_srr0_ops, 0666, },
1996         { "decr", &spufs_decr_ops, 0666, },
1997         { "decr_status", &spufs_decr_status_ops, 0666, },
1998         { "event_mask", &spufs_event_mask_ops, 0666, },
1999         { "event_status", &spufs_event_status_ops, 0444, },
2000         { "psmap", &spufs_psmap_fops, 0666, },
2001         { "phys-id", &spufs_id_ops, 0666, },
2002         { "object-id", &spufs_object_id_ops, 0666, },
2003         { "mbox_info", &spufs_mbox_info_fops, 0444, },
2004         { "ibox_info", &spufs_ibox_info_fops, 0444, },
2005         { "wbox_info", &spufs_wbox_info_fops, 0444, },
2006         { "dma_info", &spufs_dma_info_fops, 0444, },
2007         { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2008         {},
2009 };
2010
2011 struct tree_descr spufs_dir_nosched_contents[] = {
2012         { "mem",  &spufs_mem_fops,  0666, },
2013         { "mbox", &spufs_mbox_fops, 0444, },
2014         { "ibox", &spufs_ibox_fops, 0444, },
2015         { "wbox", &spufs_wbox_fops, 0222, },
2016         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2017         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2018         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2019         { "signal1", &spufs_signal1_fops, 0666, },
2020         { "signal2", &spufs_signal2_fops, 0666, },
2021         { "signal1_type", &spufs_signal1_type, 0666, },
2022         { "signal2_type", &spufs_signal2_type, 0666, },
2023         { "mss", &spufs_mss_fops, 0666, },
2024         { "mfc", &spufs_mfc_fops, 0666, },
2025         { "cntl", &spufs_cntl_fops,  0666, },
2026         { "npc", &spufs_npc_ops, 0666, },
2027         { "psmap", &spufs_psmap_fops, 0666, },
2028         { "phys-id", &spufs_id_ops, 0666, },
2029         { "object-id", &spufs_object_id_ops, 0666, },
2030         {},
2031 };
2032
2033 struct spufs_coredump_reader spufs_coredump_read[] = {
2034         { "regs", __spufs_regs_read, NULL, 128 * 16 },
2035         { "fpcr", __spufs_fpcr_read, NULL, 16 },
2036         { "lslr", NULL, __spufs_lslr_get, 11 },
2037         { "decr", NULL, __spufs_decr_get, 11 },
2038         { "decr_status", NULL, __spufs_decr_status_get, 11 },
2039         { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2040         { "signal1", __spufs_signal1_read, NULL, 4 },
2041         { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2042         { "signal2", __spufs_signal2_read, NULL, 4 },
2043         { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2044         { "event_mask", NULL, __spufs_event_mask_get, 8 },
2045         { "event_status", NULL, __spufs_event_status_get, 8 },
2046         { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2047         { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2048         { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2049         { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2050         { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2051         { "object-id", NULL, __spufs_object_id_get, 19 },
2052         { },
2053 };
2054 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2055