2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
33 #include <asm/semaphore.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 spufs_mem_open(struct inode *inode, struct file *file)
45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 file->f_mapping = inode->i_mapping;
49 ctx->local_store = inode->i_mapping;
54 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
55 size_t size, loff_t *pos)
57 char *local_store = ctx->ops->get_ls(ctx);
58 return simple_read_from_buffer(buffer, size, pos, local_store,
63 spufs_mem_read(struct file *file, char __user *buffer,
64 size_t size, loff_t *pos)
67 struct spu_context *ctx = file->private_data;
70 ret = __spufs_mem_read(ctx, buffer, size, pos);
76 spufs_mem_write(struct file *file, const char __user *buffer,
77 size_t size, loff_t *pos)
79 struct spu_context *ctx = file->private_data;
83 size = min_t(ssize_t, LS_SIZE - *pos, size);
90 local_store = ctx->ops->get_ls(ctx);
91 ret = copy_from_user(local_store + *pos - size,
92 buffer, size) ? -EFAULT : size;
98 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
99 unsigned long address)
101 struct spu_context *ctx = vma->vm_file->private_data;
102 unsigned long pfn, offset = address - vma->vm_start;
104 offset += vma->vm_pgoff << PAGE_SHIFT;
108 if (ctx->state == SPU_STATE_SAVED) {
109 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
113 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
115 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
117 vm_insert_pfn(vma, address, pfn);
121 return NOPFN_REFAULT;
125 static struct vm_operations_struct spufs_mem_mmap_vmops = {
126 .nopfn = spufs_mem_mmap_nopfn,
130 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
132 if (!(vma->vm_flags & VM_SHARED))
135 vma->vm_flags |= VM_IO | VM_PFNMAP;
136 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
139 vma->vm_ops = &spufs_mem_mmap_vmops;
143 static const struct file_operations spufs_mem_fops = {
144 .open = spufs_mem_open,
145 .read = spufs_mem_read,
146 .write = spufs_mem_write,
147 .llseek = generic_file_llseek,
148 .mmap = spufs_mem_mmap,
151 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
152 unsigned long address,
153 unsigned long ps_offs,
154 unsigned long ps_size)
156 struct spu_context *ctx = vma->vm_file->private_data;
157 unsigned long area, offset = address - vma->vm_start;
160 offset += vma->vm_pgoff << PAGE_SHIFT;
161 if (offset >= ps_size)
164 /* error here usually means a signal.. we might want to test
165 * the error code more precisely though
167 ret = spu_acquire_runnable(ctx);
169 return NOPFN_REFAULT;
171 area = ctx->spu->problem_phys + ps_offs;
172 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
175 return NOPFN_REFAULT;
179 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
180 unsigned long address)
182 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
185 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
186 .nopfn = spufs_cntl_mmap_nopfn,
190 * mmap support for problem state control area [0x4000 - 0x4fff].
192 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
194 if (!(vma->vm_flags & VM_SHARED))
197 vma->vm_flags |= VM_IO | VM_PFNMAP;
198 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
199 | _PAGE_NO_CACHE | _PAGE_GUARDED);
201 vma->vm_ops = &spufs_cntl_mmap_vmops;
204 #else /* SPUFS_MMAP_4K */
205 #define spufs_cntl_mmap NULL
206 #endif /* !SPUFS_MMAP_4K */
208 static u64 spufs_cntl_get(void *data)
210 struct spu_context *ctx = data;
214 val = ctx->ops->status_read(ctx);
220 static void spufs_cntl_set(void *data, u64 val)
222 struct spu_context *ctx = data;
225 ctx->ops->runcntl_write(ctx, val);
229 static int spufs_cntl_open(struct inode *inode, struct file *file)
231 struct spufs_inode_info *i = SPUFS_I(inode);
232 struct spu_context *ctx = i->i_ctx;
234 file->private_data = ctx;
235 file->f_mapping = inode->i_mapping;
236 ctx->cntl = inode->i_mapping;
237 return simple_attr_open(inode, file, spufs_cntl_get,
238 spufs_cntl_set, "0x%08lx");
241 static const struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .release = simple_attr_close,
244 .read = simple_attr_read,
245 .write = simple_attr_write,
246 .mmap = spufs_cntl_mmap,
250 spufs_regs_open(struct inode *inode, struct file *file)
252 struct spufs_inode_info *i = SPUFS_I(inode);
253 file->private_data = i->i_ctx;
258 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
259 size_t size, loff_t *pos)
261 struct spu_lscsa *lscsa = ctx->csa.lscsa;
262 return simple_read_from_buffer(buffer, size, pos,
263 lscsa->gprs, sizeof lscsa->gprs);
267 spufs_regs_read(struct file *file, char __user *buffer,
268 size_t size, loff_t *pos)
271 struct spu_context *ctx = file->private_data;
273 spu_acquire_saved(ctx);
274 ret = __spufs_regs_read(ctx, buffer, size, pos);
280 spufs_regs_write(struct file *file, const char __user *buffer,
281 size_t size, loff_t *pos)
283 struct spu_context *ctx = file->private_data;
284 struct spu_lscsa *lscsa = ctx->csa.lscsa;
287 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
292 spu_acquire_saved(ctx);
294 ret = copy_from_user(lscsa->gprs + *pos - size,
295 buffer, size) ? -EFAULT : size;
301 static const struct file_operations spufs_regs_fops = {
302 .open = spufs_regs_open,
303 .read = spufs_regs_read,
304 .write = spufs_regs_write,
305 .llseek = generic_file_llseek,
309 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
310 size_t size, loff_t * pos)
312 struct spu_lscsa *lscsa = ctx->csa.lscsa;
313 return simple_read_from_buffer(buffer, size, pos,
314 &lscsa->fpcr, sizeof(lscsa->fpcr));
318 spufs_fpcr_read(struct file *file, char __user * buffer,
319 size_t size, loff_t * pos)
322 struct spu_context *ctx = file->private_data;
324 spu_acquire_saved(ctx);
325 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
331 spufs_fpcr_write(struct file *file, const char __user * buffer,
332 size_t size, loff_t * pos)
334 struct spu_context *ctx = file->private_data;
335 struct spu_lscsa *lscsa = ctx->csa.lscsa;
338 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
343 spu_acquire_saved(ctx);
345 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
346 buffer, size) ? -EFAULT : size;
352 static const struct file_operations spufs_fpcr_fops = {
353 .open = spufs_regs_open,
354 .read = spufs_fpcr_read,
355 .write = spufs_fpcr_write,
356 .llseek = generic_file_llseek,
359 /* generic open function for all pipe-like files */
360 static int spufs_pipe_open(struct inode *inode, struct file *file)
362 struct spufs_inode_info *i = SPUFS_I(inode);
363 file->private_data = i->i_ctx;
365 return nonseekable_open(inode, file);
369 * Read as many bytes from the mailbox as possible, until
370 * one of the conditions becomes true:
372 * - no more data available in the mailbox
373 * - end of the user provided buffer
374 * - end of the mapped area
376 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
377 size_t len, loff_t *pos)
379 struct spu_context *ctx = file->private_data;
380 u32 mbox_data, __user *udata;
386 if (!access_ok(VERIFY_WRITE, buf, len))
389 udata = (void __user *)buf;
392 for (count = 0; (count + 4) <= len; count += 4, udata++) {
394 ret = ctx->ops->mbox_read(ctx, &mbox_data);
399 * at the end of the mapped area, we can fault
400 * but still need to return the data we have
401 * read successfully so far.
403 ret = __put_user(mbox_data, udata);
418 static const struct file_operations spufs_mbox_fops = {
419 .open = spufs_pipe_open,
420 .read = spufs_mbox_read,
423 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
424 size_t len, loff_t *pos)
426 struct spu_context *ctx = file->private_data;
434 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
438 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
444 static const struct file_operations spufs_mbox_stat_fops = {
445 .open = spufs_pipe_open,
446 .read = spufs_mbox_stat_read,
449 /* low-level ibox access function */
450 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
452 return ctx->ops->ibox_read(ctx, data);
455 static int spufs_ibox_fasync(int fd, struct file *file, int on)
457 struct spu_context *ctx = file->private_data;
459 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
462 /* interrupt-level ibox callback function. */
463 void spufs_ibox_callback(struct spu *spu)
465 struct spu_context *ctx = spu->ctx;
467 wake_up_all(&ctx->ibox_wq);
468 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
472 * Read as many bytes from the interrupt mailbox as possible, until
473 * one of the conditions becomes true:
475 * - no more data available in the mailbox
476 * - end of the user provided buffer
477 * - end of the mapped area
479 * If the file is opened without O_NONBLOCK, we wait here until
480 * any data is available, but return when we have been able to
483 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
484 size_t len, loff_t *pos)
486 struct spu_context *ctx = file->private_data;
487 u32 ibox_data, __user *udata;
493 if (!access_ok(VERIFY_WRITE, buf, len))
496 udata = (void __user *)buf;
500 /* wait only for the first element */
502 if (file->f_flags & O_NONBLOCK) {
503 if (!spu_ibox_read(ctx, &ibox_data))
506 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
511 /* if we can't write at all, return -EFAULT */
512 count = __put_user(ibox_data, udata);
516 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
518 ret = ctx->ops->ibox_read(ctx, &ibox_data);
522 * at the end of the mapped area, we can fault
523 * but still need to return the data we have
524 * read successfully so far.
526 ret = __put_user(ibox_data, udata);
537 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
539 struct spu_context *ctx = file->private_data;
542 poll_wait(file, &ctx->ibox_wq, wait);
545 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
551 static const struct file_operations spufs_ibox_fops = {
552 .open = spufs_pipe_open,
553 .read = spufs_ibox_read,
554 .poll = spufs_ibox_poll,
555 .fasync = spufs_ibox_fasync,
558 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
559 size_t len, loff_t *pos)
561 struct spu_context *ctx = file->private_data;
568 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
571 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
577 static const struct file_operations spufs_ibox_stat_fops = {
578 .open = spufs_pipe_open,
579 .read = spufs_ibox_stat_read,
582 /* low-level mailbox write */
583 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
585 return ctx->ops->wbox_write(ctx, data);
588 static int spufs_wbox_fasync(int fd, struct file *file, int on)
590 struct spu_context *ctx = file->private_data;
593 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
598 /* interrupt-level wbox callback function. */
599 void spufs_wbox_callback(struct spu *spu)
601 struct spu_context *ctx = spu->ctx;
603 wake_up_all(&ctx->wbox_wq);
604 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
608 * Write as many bytes to the interrupt mailbox as possible, until
609 * one of the conditions becomes true:
611 * - the mailbox is full
612 * - end of the user provided buffer
613 * - end of the mapped area
615 * If the file is opened without O_NONBLOCK, we wait here until
616 * space is availabyl, but return when we have been able to
619 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
620 size_t len, loff_t *pos)
622 struct spu_context *ctx = file->private_data;
623 u32 wbox_data, __user *udata;
629 udata = (void __user *)buf;
630 if (!access_ok(VERIFY_READ, buf, len))
633 if (__get_user(wbox_data, udata))
639 * make sure we can at least write one element, by waiting
640 * in case of !O_NONBLOCK
643 if (file->f_flags & O_NONBLOCK) {
644 if (!spu_wbox_write(ctx, wbox_data))
647 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
653 /* write aѕ much as possible */
654 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
656 ret = __get_user(wbox_data, udata);
660 ret = spu_wbox_write(ctx, wbox_data);
670 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
672 struct spu_context *ctx = file->private_data;
675 poll_wait(file, &ctx->wbox_wq, wait);
678 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
684 static const struct file_operations spufs_wbox_fops = {
685 .open = spufs_pipe_open,
686 .write = spufs_wbox_write,
687 .poll = spufs_wbox_poll,
688 .fasync = spufs_wbox_fasync,
691 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
692 size_t len, loff_t *pos)
694 struct spu_context *ctx = file->private_data;
701 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
704 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
710 static const struct file_operations spufs_wbox_stat_fops = {
711 .open = spufs_pipe_open,
712 .read = spufs_wbox_stat_read,
715 static int spufs_signal1_open(struct inode *inode, struct file *file)
717 struct spufs_inode_info *i = SPUFS_I(inode);
718 struct spu_context *ctx = i->i_ctx;
719 file->private_data = ctx;
720 file->f_mapping = inode->i_mapping;
721 ctx->signal1 = inode->i_mapping;
722 return nonseekable_open(inode, file);
725 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
726 size_t len, loff_t *pos)
734 if (ctx->csa.spu_chnlcnt_RW[3]) {
735 data = ctx->csa.spu_chnldata_RW[3];
742 if (copy_to_user(buf, &data, 4))
749 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
750 size_t len, loff_t *pos)
753 struct spu_context *ctx = file->private_data;
755 spu_acquire_saved(ctx);
756 ret = __spufs_signal1_read(ctx, buf, len, pos);
762 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
763 size_t len, loff_t *pos)
765 struct spu_context *ctx;
768 ctx = file->private_data;
773 if (copy_from_user(&data, buf, 4))
777 ctx->ops->signal1_write(ctx, data);
783 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
784 unsigned long address)
786 #if PAGE_SIZE == 0x1000
787 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
788 #elif PAGE_SIZE == 0x10000
789 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
790 * signal 1 and 2 area
792 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
794 #error unsupported page size
798 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
799 .nopfn = spufs_signal1_mmap_nopfn,
802 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
804 if (!(vma->vm_flags & VM_SHARED))
807 vma->vm_flags |= VM_IO | VM_PFNMAP;
808 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
809 | _PAGE_NO_CACHE | _PAGE_GUARDED);
811 vma->vm_ops = &spufs_signal1_mmap_vmops;
815 static const struct file_operations spufs_signal1_fops = {
816 .open = spufs_signal1_open,
817 .read = spufs_signal1_read,
818 .write = spufs_signal1_write,
819 .mmap = spufs_signal1_mmap,
822 static int spufs_signal2_open(struct inode *inode, struct file *file)
824 struct spufs_inode_info *i = SPUFS_I(inode);
825 struct spu_context *ctx = i->i_ctx;
826 file->private_data = ctx;
827 file->f_mapping = inode->i_mapping;
828 ctx->signal2 = inode->i_mapping;
829 return nonseekable_open(inode, file);
832 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
833 size_t len, loff_t *pos)
841 if (ctx->csa.spu_chnlcnt_RW[4]) {
842 data = ctx->csa.spu_chnldata_RW[4];
849 if (copy_to_user(buf, &data, 4))
856 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
857 size_t len, loff_t *pos)
859 struct spu_context *ctx = file->private_data;
862 spu_acquire_saved(ctx);
863 ret = __spufs_signal2_read(ctx, buf, len, pos);
869 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
870 size_t len, loff_t *pos)
872 struct spu_context *ctx;
875 ctx = file->private_data;
880 if (copy_from_user(&data, buf, 4))
884 ctx->ops->signal2_write(ctx, data);
891 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
892 unsigned long address)
894 #if PAGE_SIZE == 0x1000
895 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
896 #elif PAGE_SIZE == 0x10000
897 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
898 * signal 1 and 2 area
900 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
902 #error unsupported page size
906 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
907 .nopfn = spufs_signal2_mmap_nopfn,
910 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
912 if (!(vma->vm_flags & VM_SHARED))
915 vma->vm_flags |= VM_IO | VM_PFNMAP;
916 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
917 | _PAGE_NO_CACHE | _PAGE_GUARDED);
919 vma->vm_ops = &spufs_signal2_mmap_vmops;
922 #else /* SPUFS_MMAP_4K */
923 #define spufs_signal2_mmap NULL
924 #endif /* !SPUFS_MMAP_4K */
926 static const struct file_operations spufs_signal2_fops = {
927 .open = spufs_signal2_open,
928 .read = spufs_signal2_read,
929 .write = spufs_signal2_write,
930 .mmap = spufs_signal2_mmap,
933 static void spufs_signal1_type_set(void *data, u64 val)
935 struct spu_context *ctx = data;
938 ctx->ops->signal1_type_set(ctx, val);
942 static u64 __spufs_signal1_type_get(void *data)
944 struct spu_context *ctx = data;
945 return ctx->ops->signal1_type_get(ctx);
948 static u64 spufs_signal1_type_get(void *data)
950 struct spu_context *ctx = data;
954 ret = __spufs_signal1_type_get(data);
959 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
960 spufs_signal1_type_set, "%llu");
962 static void spufs_signal2_type_set(void *data, u64 val)
964 struct spu_context *ctx = data;
967 ctx->ops->signal2_type_set(ctx, val);
971 static u64 __spufs_signal2_type_get(void *data)
973 struct spu_context *ctx = data;
974 return ctx->ops->signal2_type_get(ctx);
977 static u64 spufs_signal2_type_get(void *data)
979 struct spu_context *ctx = data;
983 ret = __spufs_signal2_type_get(data);
988 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
989 spufs_signal2_type_set, "%llu");
992 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
993 unsigned long address)
995 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
998 static struct vm_operations_struct spufs_mss_mmap_vmops = {
999 .nopfn = spufs_mss_mmap_nopfn,
1003 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1005 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1007 if (!(vma->vm_flags & VM_SHARED))
1010 vma->vm_flags |= VM_IO | VM_PFNMAP;
1011 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1012 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1014 vma->vm_ops = &spufs_mss_mmap_vmops;
1017 #else /* SPUFS_MMAP_4K */
1018 #define spufs_mss_mmap NULL
1019 #endif /* !SPUFS_MMAP_4K */
1021 static int spufs_mss_open(struct inode *inode, struct file *file)
1023 struct spufs_inode_info *i = SPUFS_I(inode);
1025 file->private_data = i->i_ctx;
1026 return nonseekable_open(inode, file);
1029 static const struct file_operations spufs_mss_fops = {
1030 .open = spufs_mss_open,
1031 .mmap = spufs_mss_mmap,
1034 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1035 unsigned long address)
1037 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1040 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1041 .nopfn = spufs_psmap_mmap_nopfn,
1045 * mmap support for full problem state area [0x00000 - 0x1ffff].
1047 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1049 if (!(vma->vm_flags & VM_SHARED))
1052 vma->vm_flags |= VM_IO | VM_PFNMAP;
1053 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1054 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1056 vma->vm_ops = &spufs_psmap_mmap_vmops;
1060 static int spufs_psmap_open(struct inode *inode, struct file *file)
1062 struct spufs_inode_info *i = SPUFS_I(inode);
1064 file->private_data = i->i_ctx;
1065 return nonseekable_open(inode, file);
1068 static const struct file_operations spufs_psmap_fops = {
1069 .open = spufs_psmap_open,
1070 .mmap = spufs_psmap_mmap,
1075 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1076 unsigned long address)
1078 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1081 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1082 .nopfn = spufs_mfc_mmap_nopfn,
1086 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1088 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1090 if (!(vma->vm_flags & VM_SHARED))
1093 vma->vm_flags |= VM_IO | VM_PFNMAP;
1094 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1095 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1097 vma->vm_ops = &spufs_mfc_mmap_vmops;
1100 #else /* SPUFS_MMAP_4K */
1101 #define spufs_mfc_mmap NULL
1102 #endif /* !SPUFS_MMAP_4K */
1104 static int spufs_mfc_open(struct inode *inode, struct file *file)
1106 struct spufs_inode_info *i = SPUFS_I(inode);
1107 struct spu_context *ctx = i->i_ctx;
1109 /* we don't want to deal with DMA into other processes */
1110 if (ctx->owner != current->mm)
1113 if (atomic_read(&inode->i_count) != 1)
1116 file->private_data = ctx;
1117 return nonseekable_open(inode, file);
1120 /* interrupt-level mfc callback function. */
1121 void spufs_mfc_callback(struct spu *spu)
1123 struct spu_context *ctx = spu->ctx;
1125 wake_up_all(&ctx->mfc_wq);
1127 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1128 if (ctx->mfc_fasync) {
1129 u32 free_elements, tagstatus;
1132 /* no need for spu_acquire in interrupt context */
1133 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1134 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1137 if (free_elements & 0xffff)
1139 if (tagstatus & ctx->tagwait)
1142 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1146 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1148 /* See if there is one tag group is complete */
1149 /* FIXME we need locking around tagwait */
1150 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1151 ctx->tagwait &= ~*status;
1155 /* enable interrupt waiting for any tag group,
1156 may silently fail if interrupts are already enabled */
1157 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1161 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1162 size_t size, loff_t *pos)
1164 struct spu_context *ctx = file->private_data;
1172 if (file->f_flags & O_NONBLOCK) {
1173 status = ctx->ops->read_mfc_tagstatus(ctx);
1174 if (!(status & ctx->tagwait))
1177 ctx->tagwait &= ~status;
1179 ret = spufs_wait(ctx->mfc_wq,
1180 spufs_read_mfc_tagstatus(ctx, &status));
1188 if (copy_to_user(buffer, &status, 4))
1195 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1197 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1198 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1209 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1213 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1214 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1219 switch (cmd->size & 0xf) {
1240 pr_debug("invalid DMA alignment %x for size %x\n",
1241 cmd->lsa & 0xf, cmd->size);
1245 if (cmd->size > 16 * 1024) {
1246 pr_debug("invalid DMA size %x\n", cmd->size);
1250 if (cmd->tag & 0xfff0) {
1251 /* we reserve the higher tag numbers for kernel use */
1252 pr_debug("invalid DMA tag\n");
1257 /* not supported in this version */
1258 pr_debug("invalid DMA class\n");
1265 static int spu_send_mfc_command(struct spu_context *ctx,
1266 struct mfc_dma_command cmd,
1269 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1270 if (*error == -EAGAIN) {
1271 /* wait for any tag group to complete
1272 so we have space for the new command */
1273 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1274 /* try again, because the queue might be
1276 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1277 if (*error == -EAGAIN)
1283 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1284 size_t size, loff_t *pos)
1286 struct spu_context *ctx = file->private_data;
1287 struct mfc_dma_command cmd;
1290 if (size != sizeof cmd)
1294 if (copy_from_user(&cmd, buffer, sizeof cmd))
1297 ret = spufs_check_valid_dma(&cmd);
1301 spu_acquire_runnable(ctx);
1302 if (file->f_flags & O_NONBLOCK) {
1303 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1306 ret = spufs_wait(ctx->mfc_wq,
1307 spu_send_mfc_command(ctx, cmd, &status));
1316 ctx->tagwait |= 1 << cmd.tag;
1323 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1325 struct spu_context *ctx = file->private_data;
1326 u32 free_elements, tagstatus;
1330 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1331 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1332 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1335 poll_wait(file, &ctx->mfc_wq, wait);
1338 if (free_elements & 0xffff)
1339 mask |= POLLOUT | POLLWRNORM;
1340 if (tagstatus & ctx->tagwait)
1341 mask |= POLLIN | POLLRDNORM;
1343 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1344 free_elements, tagstatus, ctx->tagwait);
1349 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1351 struct spu_context *ctx = file->private_data;
1356 /* this currently hangs */
1357 ret = spufs_wait(ctx->mfc_wq,
1358 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1361 ret = spufs_wait(ctx->mfc_wq,
1362 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1372 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1375 return spufs_mfc_flush(file, NULL);
1378 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1380 struct spu_context *ctx = file->private_data;
1382 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1385 static const struct file_operations spufs_mfc_fops = {
1386 .open = spufs_mfc_open,
1387 .read = spufs_mfc_read,
1388 .write = spufs_mfc_write,
1389 .poll = spufs_mfc_poll,
1390 .flush = spufs_mfc_flush,
1391 .fsync = spufs_mfc_fsync,
1392 .fasync = spufs_mfc_fasync,
1393 .mmap = spufs_mfc_mmap,
1396 static void spufs_npc_set(void *data, u64 val)
1398 struct spu_context *ctx = data;
1400 ctx->ops->npc_write(ctx, val);
1404 static u64 spufs_npc_get(void *data)
1406 struct spu_context *ctx = data;
1409 ret = ctx->ops->npc_read(ctx);
1413 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1416 static void spufs_decr_set(void *data, u64 val)
1418 struct spu_context *ctx = data;
1419 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1420 spu_acquire_saved(ctx);
1421 lscsa->decr.slot[0] = (u32) val;
1425 static u64 __spufs_decr_get(void *data)
1427 struct spu_context *ctx = data;
1428 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1429 return lscsa->decr.slot[0];
1432 static u64 spufs_decr_get(void *data)
1434 struct spu_context *ctx = data;
1436 spu_acquire_saved(ctx);
1437 ret = __spufs_decr_get(data);
1441 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1444 static void spufs_decr_status_set(void *data, u64 val)
1446 struct spu_context *ctx = data;
1447 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1448 spu_acquire_saved(ctx);
1449 lscsa->decr_status.slot[0] = (u32) val;
1453 static u64 __spufs_decr_status_get(void *data)
1455 struct spu_context *ctx = data;
1456 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1457 return lscsa->decr_status.slot[0];
1460 static u64 spufs_decr_status_get(void *data)
1462 struct spu_context *ctx = data;
1464 spu_acquire_saved(ctx);
1465 ret = __spufs_decr_status_get(data);
1469 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1470 spufs_decr_status_set, "0x%llx\n")
1472 static void spufs_event_mask_set(void *data, u64 val)
1474 struct spu_context *ctx = data;
1475 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1476 spu_acquire_saved(ctx);
1477 lscsa->event_mask.slot[0] = (u32) val;
1481 static u64 __spufs_event_mask_get(void *data)
1483 struct spu_context *ctx = data;
1484 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1485 return lscsa->event_mask.slot[0];
1488 static u64 spufs_event_mask_get(void *data)
1490 struct spu_context *ctx = data;
1492 spu_acquire_saved(ctx);
1493 ret = __spufs_event_mask_get(data);
1497 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1498 spufs_event_mask_set, "0x%llx\n")
1500 static u64 __spufs_event_status_get(void *data)
1502 struct spu_context *ctx = data;
1503 struct spu_state *state = &ctx->csa;
1505 stat = state->spu_chnlcnt_RW[0];
1507 return state->spu_chnldata_RW[0];
1511 static u64 spufs_event_status_get(void *data)
1513 struct spu_context *ctx = data;
1516 spu_acquire_saved(ctx);
1517 ret = __spufs_event_status_get(data);
1521 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1524 static void spufs_srr0_set(void *data, u64 val)
1526 struct spu_context *ctx = data;
1527 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1528 spu_acquire_saved(ctx);
1529 lscsa->srr0.slot[0] = (u32) val;
1533 static u64 spufs_srr0_get(void *data)
1535 struct spu_context *ctx = data;
1536 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1538 spu_acquire_saved(ctx);
1539 ret = lscsa->srr0.slot[0];
1543 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1546 static u64 spufs_id_get(void *data)
1548 struct spu_context *ctx = data;
1552 if (ctx->state == SPU_STATE_RUNNABLE)
1553 num = ctx->spu->number;
1555 num = (unsigned int)-1;
1560 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1562 static u64 __spufs_object_id_get(void *data)
1564 struct spu_context *ctx = data;
1565 return ctx->object_id;
1568 static u64 spufs_object_id_get(void *data)
1570 /* FIXME: Should there really be no locking here? */
1571 return __spufs_object_id_get(data);
1574 static void spufs_object_id_set(void *data, u64 id)
1576 struct spu_context *ctx = data;
1577 ctx->object_id = id;
1580 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1581 spufs_object_id_set, "0x%llx\n");
1583 static u64 __spufs_lslr_get(void *data)
1585 struct spu_context *ctx = data;
1586 return ctx->csa.priv2.spu_lslr_RW;
1589 static u64 spufs_lslr_get(void *data)
1591 struct spu_context *ctx = data;
1594 spu_acquire_saved(ctx);
1595 ret = __spufs_lslr_get(data);
1600 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1602 static int spufs_info_open(struct inode *inode, struct file *file)
1604 struct spufs_inode_info *i = SPUFS_I(inode);
1605 struct spu_context *ctx = i->i_ctx;
1606 file->private_data = ctx;
1610 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1611 char __user *buf, size_t len, loff_t *pos)
1616 mbox_stat = ctx->csa.prob.mb_stat_R;
1617 if (mbox_stat & 0x0000ff) {
1618 data = ctx->csa.prob.pu_mb_R;
1621 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1624 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1625 size_t len, loff_t *pos)
1628 struct spu_context *ctx = file->private_data;
1630 if (!access_ok(VERIFY_WRITE, buf, len))
1633 spu_acquire_saved(ctx);
1634 spin_lock(&ctx->csa.register_lock);
1635 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1636 spin_unlock(&ctx->csa.register_lock);
1642 static const struct file_operations spufs_mbox_info_fops = {
1643 .open = spufs_info_open,
1644 .read = spufs_mbox_info_read,
1645 .llseek = generic_file_llseek,
1648 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1649 char __user *buf, size_t len, loff_t *pos)
1654 ibox_stat = ctx->csa.prob.mb_stat_R;
1655 if (ibox_stat & 0xff0000) {
1656 data = ctx->csa.priv2.puint_mb_R;
1659 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1662 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1663 size_t len, loff_t *pos)
1665 struct spu_context *ctx = file->private_data;
1668 if (!access_ok(VERIFY_WRITE, buf, len))
1671 spu_acquire_saved(ctx);
1672 spin_lock(&ctx->csa.register_lock);
1673 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1674 spin_unlock(&ctx->csa.register_lock);
1680 static const struct file_operations spufs_ibox_info_fops = {
1681 .open = spufs_info_open,
1682 .read = spufs_ibox_info_read,
1683 .llseek = generic_file_llseek,
1686 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1687 char __user *buf, size_t len, loff_t *pos)
1693 wbox_stat = ctx->csa.prob.mb_stat_R;
1694 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1695 for (i = 0; i < cnt; i++) {
1696 data[i] = ctx->csa.spu_mailbox_data[i];
1699 return simple_read_from_buffer(buf, len, pos, &data,
1703 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1704 size_t len, loff_t *pos)
1706 struct spu_context *ctx = file->private_data;
1709 if (!access_ok(VERIFY_WRITE, buf, len))
1712 spu_acquire_saved(ctx);
1713 spin_lock(&ctx->csa.register_lock);
1714 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1715 spin_unlock(&ctx->csa.register_lock);
1721 static const struct file_operations spufs_wbox_info_fops = {
1722 .open = spufs_info_open,
1723 .read = spufs_wbox_info_read,
1724 .llseek = generic_file_llseek,
1727 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1728 char __user *buf, size_t len, loff_t *pos)
1730 struct spu_dma_info info;
1731 struct mfc_cq_sr *qp, *spuqp;
1734 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1735 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1736 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1737 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1738 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1739 for (i = 0; i < 16; i++) {
1740 qp = &info.dma_info_command_data[i];
1741 spuqp = &ctx->csa.priv2.spuq[i];
1743 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1744 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1745 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1746 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1749 return simple_read_from_buffer(buf, len, pos, &info,
1753 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1754 size_t len, loff_t *pos)
1756 struct spu_context *ctx = file->private_data;
1759 if (!access_ok(VERIFY_WRITE, buf, len))
1762 spu_acquire_saved(ctx);
1763 spin_lock(&ctx->csa.register_lock);
1764 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1765 spin_unlock(&ctx->csa.register_lock);
1771 static const struct file_operations spufs_dma_info_fops = {
1772 .open = spufs_info_open,
1773 .read = spufs_dma_info_read,
1776 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1777 char __user *buf, size_t len, loff_t *pos)
1779 struct spu_proxydma_info info;
1780 struct mfc_cq_sr *qp, *puqp;
1781 int ret = sizeof info;
1787 if (!access_ok(VERIFY_WRITE, buf, len))
1790 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1791 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1792 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1793 for (i = 0; i < 8; i++) {
1794 qp = &info.proxydma_info_command_data[i];
1795 puqp = &ctx->csa.priv2.puq[i];
1797 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1798 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1799 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1800 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1803 return simple_read_from_buffer(buf, len, pos, &info,
1807 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1808 size_t len, loff_t *pos)
1810 struct spu_context *ctx = file->private_data;
1813 spu_acquire_saved(ctx);
1814 spin_lock(&ctx->csa.register_lock);
1815 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1816 spin_unlock(&ctx->csa.register_lock);
1822 static const struct file_operations spufs_proxydma_info_fops = {
1823 .open = spufs_info_open,
1824 .read = spufs_proxydma_info_read,
1827 struct tree_descr spufs_dir_contents[] = {
1828 { "mem", &spufs_mem_fops, 0666, },
1829 { "regs", &spufs_regs_fops, 0666, },
1830 { "mbox", &spufs_mbox_fops, 0444, },
1831 { "ibox", &spufs_ibox_fops, 0444, },
1832 { "wbox", &spufs_wbox_fops, 0222, },
1833 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1834 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1835 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1836 { "signal1", &spufs_signal1_fops, 0666, },
1837 { "signal2", &spufs_signal2_fops, 0666, },
1838 { "signal1_type", &spufs_signal1_type, 0666, },
1839 { "signal2_type", &spufs_signal2_type, 0666, },
1840 { "cntl", &spufs_cntl_fops, 0666, },
1841 { "fpcr", &spufs_fpcr_fops, 0666, },
1842 { "lslr", &spufs_lslr_ops, 0444, },
1843 { "mfc", &spufs_mfc_fops, 0666, },
1844 { "mss", &spufs_mss_fops, 0666, },
1845 { "npc", &spufs_npc_ops, 0666, },
1846 { "srr0", &spufs_srr0_ops, 0666, },
1847 { "decr", &spufs_decr_ops, 0666, },
1848 { "decr_status", &spufs_decr_status_ops, 0666, },
1849 { "event_mask", &spufs_event_mask_ops, 0666, },
1850 { "event_status", &spufs_event_status_ops, 0444, },
1851 { "psmap", &spufs_psmap_fops, 0666, },
1852 { "phys-id", &spufs_id_ops, 0666, },
1853 { "object-id", &spufs_object_id_ops, 0666, },
1854 { "mbox_info", &spufs_mbox_info_fops, 0444, },
1855 { "ibox_info", &spufs_ibox_info_fops, 0444, },
1856 { "wbox_info", &spufs_wbox_info_fops, 0444, },
1857 { "dma_info", &spufs_dma_info_fops, 0444, },
1858 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1862 struct tree_descr spufs_dir_nosched_contents[] = {
1863 { "mem", &spufs_mem_fops, 0666, },
1864 { "mbox", &spufs_mbox_fops, 0444, },
1865 { "ibox", &spufs_ibox_fops, 0444, },
1866 { "wbox", &spufs_wbox_fops, 0222, },
1867 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1868 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1869 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1870 { "signal1", &spufs_signal1_fops, 0666, },
1871 { "signal2", &spufs_signal2_fops, 0666, },
1872 { "signal1_type", &spufs_signal1_type, 0666, },
1873 { "signal2_type", &spufs_signal2_type, 0666, },
1874 { "mss", &spufs_mss_fops, 0666, },
1875 { "mfc", &spufs_mfc_fops, 0666, },
1876 { "cntl", &spufs_cntl_fops, 0666, },
1877 { "npc", &spufs_npc_ops, 0666, },
1878 { "psmap", &spufs_psmap_fops, 0666, },
1879 { "phys-id", &spufs_id_ops, 0666, },
1880 { "object-id", &spufs_object_id_ops, 0666, },
1884 struct spufs_coredump_reader spufs_coredump_read[] = {
1885 { "regs", __spufs_regs_read, NULL, 128 * 16 },
1886 { "fpcr", __spufs_fpcr_read, NULL, 16 },
1887 { "lslr", NULL, __spufs_lslr_get, 11 },
1888 { "decr", NULL, __spufs_decr_get, 11 },
1889 { "decr_status", NULL, __spufs_decr_status_get, 11 },
1890 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
1891 { "signal1", __spufs_signal1_read, NULL, 4 },
1892 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
1893 { "signal2", __spufs_signal2_read, NULL, 4 },
1894 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
1895 { "event_mask", NULL, __spufs_event_mask_get, 8 },
1896 { "event_status", NULL, __spufs_event_status_get, 8 },
1897 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
1898 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
1899 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
1900 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
1901 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
1902 { "object-id", NULL, __spufs_object_id_get, 19 },
1905 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;