2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
33 #include <asm/semaphore.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 spufs_mem_open(struct inode *inode, struct file *file)
45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 ctx->local_store = inode->i_mapping;
54 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
55 size_t size, loff_t *pos)
57 char *local_store = ctx->ops->get_ls(ctx);
58 return simple_read_from_buffer(buffer, size, pos, local_store,
63 spufs_mem_read(struct file *file, char __user *buffer,
64 size_t size, loff_t *pos)
67 struct spu_context *ctx = file->private_data;
70 ret = __spufs_mem_read(ctx, buffer, size, pos);
76 spufs_mem_write(struct file *file, const char __user *buffer,
77 size_t size, loff_t *pos)
79 struct spu_context *ctx = file->private_data;
83 size = min_t(ssize_t, LS_SIZE - *pos, size);
90 local_store = ctx->ops->get_ls(ctx);
91 ret = copy_from_user(local_store + *pos - size,
92 buffer, size) ? -EFAULT : size;
98 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
99 unsigned long address)
101 struct spu_context *ctx = vma->vm_file->private_data;
102 unsigned long pfn, offset = address - vma->vm_start;
104 offset += vma->vm_pgoff << PAGE_SHIFT;
108 if (ctx->state == SPU_STATE_SAVED) {
109 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
113 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
115 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
117 vm_insert_pfn(vma, address, pfn);
121 return NOPFN_REFAULT;
125 static struct vm_operations_struct spufs_mem_mmap_vmops = {
126 .nopfn = spufs_mem_mmap_nopfn,
130 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
132 if (!(vma->vm_flags & VM_SHARED))
135 vma->vm_flags |= VM_IO | VM_PFNMAP;
136 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
139 vma->vm_ops = &spufs_mem_mmap_vmops;
143 static const struct file_operations spufs_mem_fops = {
144 .open = spufs_mem_open,
145 .read = spufs_mem_read,
146 .write = spufs_mem_write,
147 .llseek = generic_file_llseek,
148 .mmap = spufs_mem_mmap,
151 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
152 unsigned long address,
153 unsigned long ps_offs,
154 unsigned long ps_size)
156 struct spu_context *ctx = vma->vm_file->private_data;
157 unsigned long area, offset = address - vma->vm_start;
160 offset += vma->vm_pgoff << PAGE_SHIFT;
161 if (offset >= ps_size)
164 /* error here usually means a signal.. we might want to test
165 * the error code more precisely though
167 ret = spu_acquire_runnable(ctx, 0);
169 return NOPFN_REFAULT;
171 area = ctx->spu->problem_phys + ps_offs;
172 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
175 return NOPFN_REFAULT;
179 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
180 unsigned long address)
182 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
185 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
186 .nopfn = spufs_cntl_mmap_nopfn,
190 * mmap support for problem state control area [0x4000 - 0x4fff].
192 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
194 if (!(vma->vm_flags & VM_SHARED))
197 vma->vm_flags |= VM_IO | VM_PFNMAP;
198 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
199 | _PAGE_NO_CACHE | _PAGE_GUARDED);
201 vma->vm_ops = &spufs_cntl_mmap_vmops;
204 #else /* SPUFS_MMAP_4K */
205 #define spufs_cntl_mmap NULL
206 #endif /* !SPUFS_MMAP_4K */
208 static u64 spufs_cntl_get(void *data)
210 struct spu_context *ctx = data;
214 val = ctx->ops->status_read(ctx);
220 static void spufs_cntl_set(void *data, u64 val)
222 struct spu_context *ctx = data;
225 ctx->ops->runcntl_write(ctx, val);
229 static int spufs_cntl_open(struct inode *inode, struct file *file)
231 struct spufs_inode_info *i = SPUFS_I(inode);
232 struct spu_context *ctx = i->i_ctx;
234 file->private_data = ctx;
235 ctx->cntl = inode->i_mapping;
237 return simple_attr_open(inode, file, spufs_cntl_get,
238 spufs_cntl_set, "0x%08lx");
241 static const struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .release = simple_attr_close,
244 .read = simple_attr_read,
245 .write = simple_attr_write,
246 .mmap = spufs_cntl_mmap,
250 spufs_regs_open(struct inode *inode, struct file *file)
252 struct spufs_inode_info *i = SPUFS_I(inode);
253 file->private_data = i->i_ctx;
258 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
259 size_t size, loff_t *pos)
261 struct spu_lscsa *lscsa = ctx->csa.lscsa;
262 return simple_read_from_buffer(buffer, size, pos,
263 lscsa->gprs, sizeof lscsa->gprs);
267 spufs_regs_read(struct file *file, char __user *buffer,
268 size_t size, loff_t *pos)
271 struct spu_context *ctx = file->private_data;
273 spu_acquire_saved(ctx);
274 ret = __spufs_regs_read(ctx, buffer, size, pos);
280 spufs_regs_write(struct file *file, const char __user *buffer,
281 size_t size, loff_t *pos)
283 struct spu_context *ctx = file->private_data;
284 struct spu_lscsa *lscsa = ctx->csa.lscsa;
287 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
292 spu_acquire_saved(ctx);
294 ret = copy_from_user(lscsa->gprs + *pos - size,
295 buffer, size) ? -EFAULT : size;
301 static const struct file_operations spufs_regs_fops = {
302 .open = spufs_regs_open,
303 .read = spufs_regs_read,
304 .write = spufs_regs_write,
305 .llseek = generic_file_llseek,
309 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
310 size_t size, loff_t * pos)
312 struct spu_lscsa *lscsa = ctx->csa.lscsa;
313 return simple_read_from_buffer(buffer, size, pos,
314 &lscsa->fpcr, sizeof(lscsa->fpcr));
318 spufs_fpcr_read(struct file *file, char __user * buffer,
319 size_t size, loff_t * pos)
322 struct spu_context *ctx = file->private_data;
324 spu_acquire_saved(ctx);
325 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
331 spufs_fpcr_write(struct file *file, const char __user * buffer,
332 size_t size, loff_t * pos)
334 struct spu_context *ctx = file->private_data;
335 struct spu_lscsa *lscsa = ctx->csa.lscsa;
338 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
343 spu_acquire_saved(ctx);
345 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
346 buffer, size) ? -EFAULT : size;
352 static const struct file_operations spufs_fpcr_fops = {
353 .open = spufs_regs_open,
354 .read = spufs_fpcr_read,
355 .write = spufs_fpcr_write,
356 .llseek = generic_file_llseek,
359 /* generic open function for all pipe-like files */
360 static int spufs_pipe_open(struct inode *inode, struct file *file)
362 struct spufs_inode_info *i = SPUFS_I(inode);
363 file->private_data = i->i_ctx;
365 return nonseekable_open(inode, file);
369 * Read as many bytes from the mailbox as possible, until
370 * one of the conditions becomes true:
372 * - no more data available in the mailbox
373 * - end of the user provided buffer
374 * - end of the mapped area
376 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
377 size_t len, loff_t *pos)
379 struct spu_context *ctx = file->private_data;
380 u32 mbox_data, __user *udata;
386 if (!access_ok(VERIFY_WRITE, buf, len))
389 udata = (void __user *)buf;
392 for (count = 0; (count + 4) <= len; count += 4, udata++) {
394 ret = ctx->ops->mbox_read(ctx, &mbox_data);
399 * at the end of the mapped area, we can fault
400 * but still need to return the data we have
401 * read successfully so far.
403 ret = __put_user(mbox_data, udata);
418 static const struct file_operations spufs_mbox_fops = {
419 .open = spufs_pipe_open,
420 .read = spufs_mbox_read,
423 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
424 size_t len, loff_t *pos)
426 struct spu_context *ctx = file->private_data;
434 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
438 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
444 static const struct file_operations spufs_mbox_stat_fops = {
445 .open = spufs_pipe_open,
446 .read = spufs_mbox_stat_read,
449 /* low-level ibox access function */
450 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
452 return ctx->ops->ibox_read(ctx, data);
455 static int spufs_ibox_fasync(int fd, struct file *file, int on)
457 struct spu_context *ctx = file->private_data;
459 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
462 /* interrupt-level ibox callback function. */
463 void spufs_ibox_callback(struct spu *spu)
465 struct spu_context *ctx = spu->ctx;
467 wake_up_all(&ctx->ibox_wq);
468 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
472 * Read as many bytes from the interrupt mailbox as possible, until
473 * one of the conditions becomes true:
475 * - no more data available in the mailbox
476 * - end of the user provided buffer
477 * - end of the mapped area
479 * If the file is opened without O_NONBLOCK, we wait here until
480 * any data is available, but return when we have been able to
483 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
484 size_t len, loff_t *pos)
486 struct spu_context *ctx = file->private_data;
487 u32 ibox_data, __user *udata;
493 if (!access_ok(VERIFY_WRITE, buf, len))
496 udata = (void __user *)buf;
500 /* wait only for the first element */
502 if (file->f_flags & O_NONBLOCK) {
503 if (!spu_ibox_read(ctx, &ibox_data))
506 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
511 /* if we can't write at all, return -EFAULT */
512 count = __put_user(ibox_data, udata);
516 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
518 ret = ctx->ops->ibox_read(ctx, &ibox_data);
522 * at the end of the mapped area, we can fault
523 * but still need to return the data we have
524 * read successfully so far.
526 ret = __put_user(ibox_data, udata);
537 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
539 struct spu_context *ctx = file->private_data;
542 poll_wait(file, &ctx->ibox_wq, wait);
545 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
551 static const struct file_operations spufs_ibox_fops = {
552 .open = spufs_pipe_open,
553 .read = spufs_ibox_read,
554 .poll = spufs_ibox_poll,
555 .fasync = spufs_ibox_fasync,
558 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
559 size_t len, loff_t *pos)
561 struct spu_context *ctx = file->private_data;
568 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
571 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
577 static const struct file_operations spufs_ibox_stat_fops = {
578 .open = spufs_pipe_open,
579 .read = spufs_ibox_stat_read,
582 /* low-level mailbox write */
583 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
585 return ctx->ops->wbox_write(ctx, data);
588 static int spufs_wbox_fasync(int fd, struct file *file, int on)
590 struct spu_context *ctx = file->private_data;
593 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
598 /* interrupt-level wbox callback function. */
599 void spufs_wbox_callback(struct spu *spu)
601 struct spu_context *ctx = spu->ctx;
603 wake_up_all(&ctx->wbox_wq);
604 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
608 * Write as many bytes to the interrupt mailbox as possible, until
609 * one of the conditions becomes true:
611 * - the mailbox is full
612 * - end of the user provided buffer
613 * - end of the mapped area
615 * If the file is opened without O_NONBLOCK, we wait here until
616 * space is availabyl, but return when we have been able to
619 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
620 size_t len, loff_t *pos)
622 struct spu_context *ctx = file->private_data;
623 u32 wbox_data, __user *udata;
629 udata = (void __user *)buf;
630 if (!access_ok(VERIFY_READ, buf, len))
633 if (__get_user(wbox_data, udata))
639 * make sure we can at least write one element, by waiting
640 * in case of !O_NONBLOCK
643 if (file->f_flags & O_NONBLOCK) {
644 if (!spu_wbox_write(ctx, wbox_data))
647 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
653 /* write aѕ much as possible */
654 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
656 ret = __get_user(wbox_data, udata);
660 ret = spu_wbox_write(ctx, wbox_data);
670 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
672 struct spu_context *ctx = file->private_data;
675 poll_wait(file, &ctx->wbox_wq, wait);
678 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
684 static const struct file_operations spufs_wbox_fops = {
685 .open = spufs_pipe_open,
686 .write = spufs_wbox_write,
687 .poll = spufs_wbox_poll,
688 .fasync = spufs_wbox_fasync,
691 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
692 size_t len, loff_t *pos)
694 struct spu_context *ctx = file->private_data;
701 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
704 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
710 static const struct file_operations spufs_wbox_stat_fops = {
711 .open = spufs_pipe_open,
712 .read = spufs_wbox_stat_read,
715 static int spufs_signal1_open(struct inode *inode, struct file *file)
717 struct spufs_inode_info *i = SPUFS_I(inode);
718 struct spu_context *ctx = i->i_ctx;
719 file->private_data = ctx;
720 ctx->signal1 = inode->i_mapping;
722 return nonseekable_open(inode, file);
725 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
726 size_t len, loff_t *pos)
734 if (ctx->csa.spu_chnlcnt_RW[3]) {
735 data = ctx->csa.spu_chnldata_RW[3];
742 if (copy_to_user(buf, &data, 4))
749 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
750 size_t len, loff_t *pos)
753 struct spu_context *ctx = file->private_data;
755 spu_acquire_saved(ctx);
756 ret = __spufs_signal1_read(ctx, buf, len, pos);
762 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
763 size_t len, loff_t *pos)
765 struct spu_context *ctx;
768 ctx = file->private_data;
773 if (copy_from_user(&data, buf, 4))
777 ctx->ops->signal1_write(ctx, data);
783 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
784 unsigned long address)
786 #if PAGE_SIZE == 0x1000
787 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
788 #elif PAGE_SIZE == 0x10000
789 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
790 * signal 1 and 2 area
792 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
794 #error unsupported page size
798 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
799 .nopfn = spufs_signal1_mmap_nopfn,
802 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
804 if (!(vma->vm_flags & VM_SHARED))
807 vma->vm_flags |= VM_IO | VM_PFNMAP;
808 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
809 | _PAGE_NO_CACHE | _PAGE_GUARDED);
811 vma->vm_ops = &spufs_signal1_mmap_vmops;
815 static const struct file_operations spufs_signal1_fops = {
816 .open = spufs_signal1_open,
817 .read = spufs_signal1_read,
818 .write = spufs_signal1_write,
819 .mmap = spufs_signal1_mmap,
822 static int spufs_signal2_open(struct inode *inode, struct file *file)
824 struct spufs_inode_info *i = SPUFS_I(inode);
825 struct spu_context *ctx = i->i_ctx;
826 file->private_data = ctx;
827 ctx->signal2 = inode->i_mapping;
829 return nonseekable_open(inode, file);
832 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
833 size_t len, loff_t *pos)
841 if (ctx->csa.spu_chnlcnt_RW[4]) {
842 data = ctx->csa.spu_chnldata_RW[4];
849 if (copy_to_user(buf, &data, 4))
856 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
857 size_t len, loff_t *pos)
859 struct spu_context *ctx = file->private_data;
862 spu_acquire_saved(ctx);
863 ret = __spufs_signal2_read(ctx, buf, len, pos);
869 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
870 size_t len, loff_t *pos)
872 struct spu_context *ctx;
875 ctx = file->private_data;
880 if (copy_from_user(&data, buf, 4))
884 ctx->ops->signal2_write(ctx, data);
891 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
892 unsigned long address)
894 #if PAGE_SIZE == 0x1000
895 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
896 #elif PAGE_SIZE == 0x10000
897 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
898 * signal 1 and 2 area
900 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
902 #error unsupported page size
906 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
907 .nopfn = spufs_signal2_mmap_nopfn,
910 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
912 if (!(vma->vm_flags & VM_SHARED))
915 vma->vm_flags |= VM_IO | VM_PFNMAP;
916 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
917 | _PAGE_NO_CACHE | _PAGE_GUARDED);
919 vma->vm_ops = &spufs_signal2_mmap_vmops;
922 #else /* SPUFS_MMAP_4K */
923 #define spufs_signal2_mmap NULL
924 #endif /* !SPUFS_MMAP_4K */
926 static const struct file_operations spufs_signal2_fops = {
927 .open = spufs_signal2_open,
928 .read = spufs_signal2_read,
929 .write = spufs_signal2_write,
930 .mmap = spufs_signal2_mmap,
933 static void spufs_signal1_type_set(void *data, u64 val)
935 struct spu_context *ctx = data;
938 ctx->ops->signal1_type_set(ctx, val);
942 static u64 __spufs_signal1_type_get(void *data)
944 struct spu_context *ctx = data;
945 return ctx->ops->signal1_type_get(ctx);
948 static u64 spufs_signal1_type_get(void *data)
950 struct spu_context *ctx = data;
954 ret = __spufs_signal1_type_get(data);
959 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
960 spufs_signal1_type_set, "%llu");
962 static void spufs_signal2_type_set(void *data, u64 val)
964 struct spu_context *ctx = data;
967 ctx->ops->signal2_type_set(ctx, val);
971 static u64 __spufs_signal2_type_get(void *data)
973 struct spu_context *ctx = data;
974 return ctx->ops->signal2_type_get(ctx);
977 static u64 spufs_signal2_type_get(void *data)
979 struct spu_context *ctx = data;
983 ret = __spufs_signal2_type_get(data);
988 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
989 spufs_signal2_type_set, "%llu");
992 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
993 unsigned long address)
995 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
998 static struct vm_operations_struct spufs_mss_mmap_vmops = {
999 .nopfn = spufs_mss_mmap_nopfn,
1003 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1005 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1007 if (!(vma->vm_flags & VM_SHARED))
1010 vma->vm_flags |= VM_IO | VM_PFNMAP;
1011 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1012 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1014 vma->vm_ops = &spufs_mss_mmap_vmops;
1017 #else /* SPUFS_MMAP_4K */
1018 #define spufs_mss_mmap NULL
1019 #endif /* !SPUFS_MMAP_4K */
1021 static int spufs_mss_open(struct inode *inode, struct file *file)
1023 struct spufs_inode_info *i = SPUFS_I(inode);
1024 struct spu_context *ctx = i->i_ctx;
1026 file->private_data = i->i_ctx;
1027 ctx->mss = inode->i_mapping;
1029 return nonseekable_open(inode, file);
1032 static const struct file_operations spufs_mss_fops = {
1033 .open = spufs_mss_open,
1034 .mmap = spufs_mss_mmap,
1037 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1038 unsigned long address)
1040 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1043 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1044 .nopfn = spufs_psmap_mmap_nopfn,
1048 * mmap support for full problem state area [0x00000 - 0x1ffff].
1050 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1052 if (!(vma->vm_flags & VM_SHARED))
1055 vma->vm_flags |= VM_IO | VM_PFNMAP;
1056 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1057 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1059 vma->vm_ops = &spufs_psmap_mmap_vmops;
1063 static int spufs_psmap_open(struct inode *inode, struct file *file)
1065 struct spufs_inode_info *i = SPUFS_I(inode);
1066 struct spu_context *ctx = i->i_ctx;
1068 file->private_data = i->i_ctx;
1069 ctx->psmap = inode->i_mapping;
1071 return nonseekable_open(inode, file);
1074 static const struct file_operations spufs_psmap_fops = {
1075 .open = spufs_psmap_open,
1076 .mmap = spufs_psmap_mmap,
1081 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1082 unsigned long address)
1084 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1087 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1088 .nopfn = spufs_mfc_mmap_nopfn,
1092 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1094 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1096 if (!(vma->vm_flags & VM_SHARED))
1099 vma->vm_flags |= VM_IO | VM_PFNMAP;
1100 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1101 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1103 vma->vm_ops = &spufs_mfc_mmap_vmops;
1106 #else /* SPUFS_MMAP_4K */
1107 #define spufs_mfc_mmap NULL
1108 #endif /* !SPUFS_MMAP_4K */
1110 static int spufs_mfc_open(struct inode *inode, struct file *file)
1112 struct spufs_inode_info *i = SPUFS_I(inode);
1113 struct spu_context *ctx = i->i_ctx;
1115 /* we don't want to deal with DMA into other processes */
1116 if (ctx->owner != current->mm)
1119 if (atomic_read(&inode->i_count) != 1)
1122 file->private_data = ctx;
1123 ctx->mfc = inode->i_mapping;
1125 return nonseekable_open(inode, file);
1128 /* interrupt-level mfc callback function. */
1129 void spufs_mfc_callback(struct spu *spu)
1131 struct spu_context *ctx = spu->ctx;
1133 wake_up_all(&ctx->mfc_wq);
1135 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1136 if (ctx->mfc_fasync) {
1137 u32 free_elements, tagstatus;
1140 /* no need for spu_acquire in interrupt context */
1141 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1142 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1145 if (free_elements & 0xffff)
1147 if (tagstatus & ctx->tagwait)
1150 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1154 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1156 /* See if there is one tag group is complete */
1157 /* FIXME we need locking around tagwait */
1158 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1159 ctx->tagwait &= ~*status;
1163 /* enable interrupt waiting for any tag group,
1164 may silently fail if interrupts are already enabled */
1165 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1169 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1170 size_t size, loff_t *pos)
1172 struct spu_context *ctx = file->private_data;
1180 if (file->f_flags & O_NONBLOCK) {
1181 status = ctx->ops->read_mfc_tagstatus(ctx);
1182 if (!(status & ctx->tagwait))
1185 ctx->tagwait &= ~status;
1187 ret = spufs_wait(ctx->mfc_wq,
1188 spufs_read_mfc_tagstatus(ctx, &status));
1196 if (copy_to_user(buffer, &status, 4))
1203 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1205 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1206 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1217 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1221 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1222 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1227 switch (cmd->size & 0xf) {
1248 pr_debug("invalid DMA alignment %x for size %x\n",
1249 cmd->lsa & 0xf, cmd->size);
1253 if (cmd->size > 16 * 1024) {
1254 pr_debug("invalid DMA size %x\n", cmd->size);
1258 if (cmd->tag & 0xfff0) {
1259 /* we reserve the higher tag numbers for kernel use */
1260 pr_debug("invalid DMA tag\n");
1265 /* not supported in this version */
1266 pr_debug("invalid DMA class\n");
1273 static int spu_send_mfc_command(struct spu_context *ctx,
1274 struct mfc_dma_command cmd,
1277 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1278 if (*error == -EAGAIN) {
1279 /* wait for any tag group to complete
1280 so we have space for the new command */
1281 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1282 /* try again, because the queue might be
1284 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1285 if (*error == -EAGAIN)
1291 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1292 size_t size, loff_t *pos)
1294 struct spu_context *ctx = file->private_data;
1295 struct mfc_dma_command cmd;
1298 if (size != sizeof cmd)
1302 if (copy_from_user(&cmd, buffer, sizeof cmd))
1305 ret = spufs_check_valid_dma(&cmd);
1309 spu_acquire_runnable(ctx, 0);
1310 if (file->f_flags & O_NONBLOCK) {
1311 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1314 ret = spufs_wait(ctx->mfc_wq,
1315 spu_send_mfc_command(ctx, cmd, &status));
1324 ctx->tagwait |= 1 << cmd.tag;
1331 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1333 struct spu_context *ctx = file->private_data;
1334 u32 free_elements, tagstatus;
1338 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1339 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1340 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1343 poll_wait(file, &ctx->mfc_wq, wait);
1346 if (free_elements & 0xffff)
1347 mask |= POLLOUT | POLLWRNORM;
1348 if (tagstatus & ctx->tagwait)
1349 mask |= POLLIN | POLLRDNORM;
1351 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1352 free_elements, tagstatus, ctx->tagwait);
1357 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1359 struct spu_context *ctx = file->private_data;
1364 /* this currently hangs */
1365 ret = spufs_wait(ctx->mfc_wq,
1366 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1369 ret = spufs_wait(ctx->mfc_wq,
1370 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1380 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1383 return spufs_mfc_flush(file, NULL);
1386 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1388 struct spu_context *ctx = file->private_data;
1390 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1393 static const struct file_operations spufs_mfc_fops = {
1394 .open = spufs_mfc_open,
1395 .read = spufs_mfc_read,
1396 .write = spufs_mfc_write,
1397 .poll = spufs_mfc_poll,
1398 .flush = spufs_mfc_flush,
1399 .fsync = spufs_mfc_fsync,
1400 .fasync = spufs_mfc_fasync,
1401 .mmap = spufs_mfc_mmap,
1404 static void spufs_npc_set(void *data, u64 val)
1406 struct spu_context *ctx = data;
1408 ctx->ops->npc_write(ctx, val);
1412 static u64 spufs_npc_get(void *data)
1414 struct spu_context *ctx = data;
1417 ret = ctx->ops->npc_read(ctx);
1421 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1424 static void spufs_decr_set(void *data, u64 val)
1426 struct spu_context *ctx = data;
1427 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1428 spu_acquire_saved(ctx);
1429 lscsa->decr.slot[0] = (u32) val;
1433 static u64 __spufs_decr_get(void *data)
1435 struct spu_context *ctx = data;
1436 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1437 return lscsa->decr.slot[0];
1440 static u64 spufs_decr_get(void *data)
1442 struct spu_context *ctx = data;
1444 spu_acquire_saved(ctx);
1445 ret = __spufs_decr_get(data);
1449 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1452 static void spufs_decr_status_set(void *data, u64 val)
1454 struct spu_context *ctx = data;
1455 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1456 spu_acquire_saved(ctx);
1457 lscsa->decr_status.slot[0] = (u32) val;
1461 static u64 __spufs_decr_status_get(void *data)
1463 struct spu_context *ctx = data;
1464 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1465 return lscsa->decr_status.slot[0];
1468 static u64 spufs_decr_status_get(void *data)
1470 struct spu_context *ctx = data;
1472 spu_acquire_saved(ctx);
1473 ret = __spufs_decr_status_get(data);
1477 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1478 spufs_decr_status_set, "0x%llx\n")
1480 static void spufs_event_mask_set(void *data, u64 val)
1482 struct spu_context *ctx = data;
1483 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1484 spu_acquire_saved(ctx);
1485 lscsa->event_mask.slot[0] = (u32) val;
1489 static u64 __spufs_event_mask_get(void *data)
1491 struct spu_context *ctx = data;
1492 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1493 return lscsa->event_mask.slot[0];
1496 static u64 spufs_event_mask_get(void *data)
1498 struct spu_context *ctx = data;
1500 spu_acquire_saved(ctx);
1501 ret = __spufs_event_mask_get(data);
1505 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1506 spufs_event_mask_set, "0x%llx\n")
1508 static u64 __spufs_event_status_get(void *data)
1510 struct spu_context *ctx = data;
1511 struct spu_state *state = &ctx->csa;
1513 stat = state->spu_chnlcnt_RW[0];
1515 return state->spu_chnldata_RW[0];
1519 static u64 spufs_event_status_get(void *data)
1521 struct spu_context *ctx = data;
1524 spu_acquire_saved(ctx);
1525 ret = __spufs_event_status_get(data);
1529 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1532 static void spufs_srr0_set(void *data, u64 val)
1534 struct spu_context *ctx = data;
1535 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1536 spu_acquire_saved(ctx);
1537 lscsa->srr0.slot[0] = (u32) val;
1541 static u64 spufs_srr0_get(void *data)
1543 struct spu_context *ctx = data;
1544 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1546 spu_acquire_saved(ctx);
1547 ret = lscsa->srr0.slot[0];
1551 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1554 static u64 spufs_id_get(void *data)
1556 struct spu_context *ctx = data;
1560 if (ctx->state == SPU_STATE_RUNNABLE)
1561 num = ctx->spu->number;
1563 num = (unsigned int)-1;
1568 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1570 static u64 __spufs_object_id_get(void *data)
1572 struct spu_context *ctx = data;
1573 return ctx->object_id;
1576 static u64 spufs_object_id_get(void *data)
1578 /* FIXME: Should there really be no locking here? */
1579 return __spufs_object_id_get(data);
1582 static void spufs_object_id_set(void *data, u64 id)
1584 struct spu_context *ctx = data;
1585 ctx->object_id = id;
1588 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1589 spufs_object_id_set, "0x%llx\n");
1591 static u64 __spufs_lslr_get(void *data)
1593 struct spu_context *ctx = data;
1594 return ctx->csa.priv2.spu_lslr_RW;
1597 static u64 spufs_lslr_get(void *data)
1599 struct spu_context *ctx = data;
1602 spu_acquire_saved(ctx);
1603 ret = __spufs_lslr_get(data);
1608 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1610 static int spufs_info_open(struct inode *inode, struct file *file)
1612 struct spufs_inode_info *i = SPUFS_I(inode);
1613 struct spu_context *ctx = i->i_ctx;
1614 file->private_data = ctx;
1618 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1619 char __user *buf, size_t len, loff_t *pos)
1624 mbox_stat = ctx->csa.prob.mb_stat_R;
1625 if (mbox_stat & 0x0000ff) {
1626 data = ctx->csa.prob.pu_mb_R;
1629 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1632 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1633 size_t len, loff_t *pos)
1636 struct spu_context *ctx = file->private_data;
1638 if (!access_ok(VERIFY_WRITE, buf, len))
1641 spu_acquire_saved(ctx);
1642 spin_lock(&ctx->csa.register_lock);
1643 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1644 spin_unlock(&ctx->csa.register_lock);
1650 static const struct file_operations spufs_mbox_info_fops = {
1651 .open = spufs_info_open,
1652 .read = spufs_mbox_info_read,
1653 .llseek = generic_file_llseek,
1656 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1657 char __user *buf, size_t len, loff_t *pos)
1662 ibox_stat = ctx->csa.prob.mb_stat_R;
1663 if (ibox_stat & 0xff0000) {
1664 data = ctx->csa.priv2.puint_mb_R;
1667 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1670 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1671 size_t len, loff_t *pos)
1673 struct spu_context *ctx = file->private_data;
1676 if (!access_ok(VERIFY_WRITE, buf, len))
1679 spu_acquire_saved(ctx);
1680 spin_lock(&ctx->csa.register_lock);
1681 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1682 spin_unlock(&ctx->csa.register_lock);
1688 static const struct file_operations spufs_ibox_info_fops = {
1689 .open = spufs_info_open,
1690 .read = spufs_ibox_info_read,
1691 .llseek = generic_file_llseek,
1694 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1695 char __user *buf, size_t len, loff_t *pos)
1701 wbox_stat = ctx->csa.prob.mb_stat_R;
1702 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1703 for (i = 0; i < cnt; i++) {
1704 data[i] = ctx->csa.spu_mailbox_data[i];
1707 return simple_read_from_buffer(buf, len, pos, &data,
1711 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1712 size_t len, loff_t *pos)
1714 struct spu_context *ctx = file->private_data;
1717 if (!access_ok(VERIFY_WRITE, buf, len))
1720 spu_acquire_saved(ctx);
1721 spin_lock(&ctx->csa.register_lock);
1722 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1723 spin_unlock(&ctx->csa.register_lock);
1729 static const struct file_operations spufs_wbox_info_fops = {
1730 .open = spufs_info_open,
1731 .read = spufs_wbox_info_read,
1732 .llseek = generic_file_llseek,
1735 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1736 char __user *buf, size_t len, loff_t *pos)
1738 struct spu_dma_info info;
1739 struct mfc_cq_sr *qp, *spuqp;
1742 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1743 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1744 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1745 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1746 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1747 for (i = 0; i < 16; i++) {
1748 qp = &info.dma_info_command_data[i];
1749 spuqp = &ctx->csa.priv2.spuq[i];
1751 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1752 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1753 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1754 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1757 return simple_read_from_buffer(buf, len, pos, &info,
1761 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1762 size_t len, loff_t *pos)
1764 struct spu_context *ctx = file->private_data;
1767 if (!access_ok(VERIFY_WRITE, buf, len))
1770 spu_acquire_saved(ctx);
1771 spin_lock(&ctx->csa.register_lock);
1772 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1773 spin_unlock(&ctx->csa.register_lock);
1779 static const struct file_operations spufs_dma_info_fops = {
1780 .open = spufs_info_open,
1781 .read = spufs_dma_info_read,
1784 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1785 char __user *buf, size_t len, loff_t *pos)
1787 struct spu_proxydma_info info;
1788 struct mfc_cq_sr *qp, *puqp;
1789 int ret = sizeof info;
1795 if (!access_ok(VERIFY_WRITE, buf, len))
1798 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1799 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1800 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1801 for (i = 0; i < 8; i++) {
1802 qp = &info.proxydma_info_command_data[i];
1803 puqp = &ctx->csa.priv2.puq[i];
1805 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1806 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1807 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1808 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1811 return simple_read_from_buffer(buf, len, pos, &info,
1815 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1816 size_t len, loff_t *pos)
1818 struct spu_context *ctx = file->private_data;
1821 spu_acquire_saved(ctx);
1822 spin_lock(&ctx->csa.register_lock);
1823 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1824 spin_unlock(&ctx->csa.register_lock);
1830 static const struct file_operations spufs_proxydma_info_fops = {
1831 .open = spufs_info_open,
1832 .read = spufs_proxydma_info_read,
1835 struct tree_descr spufs_dir_contents[] = {
1836 { "mem", &spufs_mem_fops, 0666, },
1837 { "regs", &spufs_regs_fops, 0666, },
1838 { "mbox", &spufs_mbox_fops, 0444, },
1839 { "ibox", &spufs_ibox_fops, 0444, },
1840 { "wbox", &spufs_wbox_fops, 0222, },
1841 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1842 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1843 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1844 { "signal1", &spufs_signal1_fops, 0666, },
1845 { "signal2", &spufs_signal2_fops, 0666, },
1846 { "signal1_type", &spufs_signal1_type, 0666, },
1847 { "signal2_type", &spufs_signal2_type, 0666, },
1848 { "cntl", &spufs_cntl_fops, 0666, },
1849 { "fpcr", &spufs_fpcr_fops, 0666, },
1850 { "lslr", &spufs_lslr_ops, 0444, },
1851 { "mfc", &spufs_mfc_fops, 0666, },
1852 { "mss", &spufs_mss_fops, 0666, },
1853 { "npc", &spufs_npc_ops, 0666, },
1854 { "srr0", &spufs_srr0_ops, 0666, },
1855 { "decr", &spufs_decr_ops, 0666, },
1856 { "decr_status", &spufs_decr_status_ops, 0666, },
1857 { "event_mask", &spufs_event_mask_ops, 0666, },
1858 { "event_status", &spufs_event_status_ops, 0444, },
1859 { "psmap", &spufs_psmap_fops, 0666, },
1860 { "phys-id", &spufs_id_ops, 0666, },
1861 { "object-id", &spufs_object_id_ops, 0666, },
1862 { "mbox_info", &spufs_mbox_info_fops, 0444, },
1863 { "ibox_info", &spufs_ibox_info_fops, 0444, },
1864 { "wbox_info", &spufs_wbox_info_fops, 0444, },
1865 { "dma_info", &spufs_dma_info_fops, 0444, },
1866 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1870 struct tree_descr spufs_dir_nosched_contents[] = {
1871 { "mem", &spufs_mem_fops, 0666, },
1872 { "mbox", &spufs_mbox_fops, 0444, },
1873 { "ibox", &spufs_ibox_fops, 0444, },
1874 { "wbox", &spufs_wbox_fops, 0222, },
1875 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1876 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1877 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1878 { "signal1", &spufs_signal1_fops, 0666, },
1879 { "signal2", &spufs_signal2_fops, 0666, },
1880 { "signal1_type", &spufs_signal1_type, 0666, },
1881 { "signal2_type", &spufs_signal2_type, 0666, },
1882 { "mss", &spufs_mss_fops, 0666, },
1883 { "mfc", &spufs_mfc_fops, 0666, },
1884 { "cntl", &spufs_cntl_fops, 0666, },
1885 { "npc", &spufs_npc_ops, 0666, },
1886 { "psmap", &spufs_psmap_fops, 0666, },
1887 { "phys-id", &spufs_id_ops, 0666, },
1888 { "object-id", &spufs_object_id_ops, 0666, },
1892 struct spufs_coredump_reader spufs_coredump_read[] = {
1893 { "regs", __spufs_regs_read, NULL, 128 * 16 },
1894 { "fpcr", __spufs_fpcr_read, NULL, 16 },
1895 { "lslr", NULL, __spufs_lslr_get, 11 },
1896 { "decr", NULL, __spufs_decr_get, 11 },
1897 { "decr_status", NULL, __spufs_decr_status_get, 11 },
1898 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
1899 { "signal1", __spufs_signal1_read, NULL, 4 },
1900 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
1901 { "signal2", __spufs_signal2_read, NULL, 4 },
1902 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
1903 { "event_mask", NULL, __spufs_event_mask_get, 8 },
1904 { "event_status", NULL, __spufs_event_status_get, 8 },
1905 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
1906 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
1907 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
1908 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
1909 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
1910 { "object-id", NULL, __spufs_object_id_get, 19 },
1913 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;