2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
34 #include <asm/semaphore.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 /* Simple attribute files */
45 int (*get)(void *, u64 *);
46 int (*set)(void *, u64);
47 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 const char *fmt; /* format for read operation */
51 struct mutex mutex; /* protects access to these buffers */
54 static int spufs_attr_open(struct inode *inode, struct file *file,
55 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 struct spufs_attr *attr;
60 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
66 attr->data = inode->i_private;
68 mutex_init(&attr->mutex);
69 file->private_data = attr;
71 return nonseekable_open(inode, file);
74 static int spufs_attr_release(struct inode *inode, struct file *file)
76 kfree(file->private_data);
80 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
81 size_t len, loff_t *ppos)
83 struct spufs_attr *attr;
87 attr = file->private_data;
91 ret = mutex_lock_interruptible(&attr->mutex);
95 if (*ppos) { /* continued read */
96 size = strlen(attr->get_buf);
97 } else { /* first read */
99 ret = attr->get(attr->data, &val);
103 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
104 attr->fmt, (unsigned long long)val);
107 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
109 mutex_unlock(&attr->mutex);
113 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
114 size_t len, loff_t *ppos)
116 struct spufs_attr *attr;
121 attr = file->private_data;
125 ret = mutex_lock_interruptible(&attr->mutex);
130 size = min(sizeof(attr->set_buf) - 1, len);
131 if (copy_from_user(attr->set_buf, buf, size))
134 ret = len; /* claim we got the whole input */
135 attr->set_buf[size] = '\0';
136 val = simple_strtol(attr->set_buf, NULL, 0);
137 attr->set(attr->data, val);
139 mutex_unlock(&attr->mutex);
143 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
144 static int __fops ## _open(struct inode *inode, struct file *file) \
146 __simple_attr_check_format(__fmt, 0ull); \
147 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149 static struct file_operations __fops = { \
150 .owner = THIS_MODULE, \
151 .open = __fops ## _open, \
152 .release = spufs_attr_release, \
153 .read = spufs_attr_read, \
154 .write = spufs_attr_write, \
159 spufs_mem_open(struct inode *inode, struct file *file)
161 struct spufs_inode_info *i = SPUFS_I(inode);
162 struct spu_context *ctx = i->i_ctx;
164 mutex_lock(&ctx->mapping_lock);
165 file->private_data = ctx;
167 ctx->local_store = inode->i_mapping;
168 mutex_unlock(&ctx->mapping_lock);
173 spufs_mem_release(struct inode *inode, struct file *file)
175 struct spufs_inode_info *i = SPUFS_I(inode);
176 struct spu_context *ctx = i->i_ctx;
178 mutex_lock(&ctx->mapping_lock);
180 ctx->local_store = NULL;
181 mutex_unlock(&ctx->mapping_lock);
186 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
187 size_t size, loff_t *pos)
189 char *local_store = ctx->ops->get_ls(ctx);
190 return simple_read_from_buffer(buffer, size, pos, local_store,
195 spufs_mem_read(struct file *file, char __user *buffer,
196 size_t size, loff_t *pos)
198 struct spu_context *ctx = file->private_data;
202 ret = __spufs_mem_read(ctx, buffer, size, pos);
208 spufs_mem_write(struct file *file, const char __user *buffer,
209 size_t size, loff_t *ppos)
211 struct spu_context *ctx = file->private_data;
220 if (size > LS_SIZE - pos)
221 size = LS_SIZE - pos;
224 local_store = ctx->ops->get_ls(ctx);
225 ret = copy_from_user(local_store + pos, buffer, size);
234 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
235 unsigned long address)
237 struct spu_context *ctx = vma->vm_file->private_data;
238 unsigned long pfn, offset, addr0 = address;
239 #ifdef CONFIG_SPU_FS_64K_LS
240 struct spu_state *csa = &ctx->csa;
243 /* Check what page size we are using */
244 psize = get_slice_psize(vma->vm_mm, address);
246 /* Some sanity checking */
247 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
249 /* Wow, 64K, cool, we need to align the address though */
250 if (csa->use_big_pages) {
251 BUG_ON(vma->vm_start & 0xffff);
252 address &= ~0xfffful;
254 #endif /* CONFIG_SPU_FS_64K_LS */
256 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
257 if (offset >= LS_SIZE)
260 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
261 addr0, address, offset);
265 if (ctx->state == SPU_STATE_SAVED) {
266 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
268 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
270 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
272 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
274 vm_insert_pfn(vma, address, pfn);
278 return NOPFN_REFAULT;
282 static struct vm_operations_struct spufs_mem_mmap_vmops = {
283 .nopfn = spufs_mem_mmap_nopfn,
286 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
288 #ifdef CONFIG_SPU_FS_64K_LS
289 struct spu_context *ctx = file->private_data;
290 struct spu_state *csa = &ctx->csa;
292 /* Sanity check VMA alignment */
293 if (csa->use_big_pages) {
294 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
295 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
297 if (vma->vm_start & 0xffff)
299 if (vma->vm_pgoff & 0xf)
302 #endif /* CONFIG_SPU_FS_64K_LS */
304 if (!(vma->vm_flags & VM_SHARED))
307 vma->vm_flags |= VM_IO | VM_PFNMAP;
308 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
311 vma->vm_ops = &spufs_mem_mmap_vmops;
315 #ifdef CONFIG_SPU_FS_64K_LS
316 static unsigned long spufs_get_unmapped_area(struct file *file,
317 unsigned long addr, unsigned long len, unsigned long pgoff,
320 struct spu_context *ctx = file->private_data;
321 struct spu_state *csa = &ctx->csa;
323 /* If not using big pages, fallback to normal MM g_u_a */
324 if (!csa->use_big_pages)
325 return current->mm->get_unmapped_area(file, addr, len,
328 /* Else, try to obtain a 64K pages slice */
329 return slice_get_unmapped_area(addr, len, flags,
332 #endif /* CONFIG_SPU_FS_64K_LS */
334 static const struct file_operations spufs_mem_fops = {
335 .open = spufs_mem_open,
336 .release = spufs_mem_release,
337 .read = spufs_mem_read,
338 .write = spufs_mem_write,
339 .llseek = generic_file_llseek,
340 .mmap = spufs_mem_mmap,
341 #ifdef CONFIG_SPU_FS_64K_LS
342 .get_unmapped_area = spufs_get_unmapped_area,
346 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
347 unsigned long address,
348 unsigned long ps_offs,
349 unsigned long ps_size)
351 struct spu_context *ctx = vma->vm_file->private_data;
352 unsigned long area, offset = address - vma->vm_start;
354 offset += vma->vm_pgoff << PAGE_SHIFT;
355 if (offset >= ps_size)
359 * We have to wait for context to be loaded before we have
360 * pages to hand out to the user, but we don't want to wait
361 * with the mmap_sem held.
362 * It is possible to drop the mmap_sem here, but then we need
363 * to return NOPFN_REFAULT because the mappings may have
367 if (ctx->state == SPU_STATE_SAVED) {
368 up_read(¤t->mm->mmap_sem);
369 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
370 down_read(¤t->mm->mmap_sem);
374 area = ctx->spu->problem_phys + ps_offs;
375 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
380 return NOPFN_REFAULT;
384 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
385 unsigned long address)
387 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
390 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
391 .nopfn = spufs_cntl_mmap_nopfn,
395 * mmap support for problem state control area [0x4000 - 0x4fff].
397 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
399 if (!(vma->vm_flags & VM_SHARED))
402 vma->vm_flags |= VM_IO | VM_PFNMAP;
403 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
404 | _PAGE_NO_CACHE | _PAGE_GUARDED);
406 vma->vm_ops = &spufs_cntl_mmap_vmops;
409 #else /* SPUFS_MMAP_4K */
410 #define spufs_cntl_mmap NULL
411 #endif /* !SPUFS_MMAP_4K */
413 static int spufs_cntl_get(void *data, u64 *val)
415 struct spu_context *ctx = data;
418 *val = ctx->ops->status_read(ctx);
424 static int spufs_cntl_set(void *data, u64 val)
426 struct spu_context *ctx = data;
429 ctx->ops->runcntl_write(ctx, val);
435 static int spufs_cntl_open(struct inode *inode, struct file *file)
437 struct spufs_inode_info *i = SPUFS_I(inode);
438 struct spu_context *ctx = i->i_ctx;
440 mutex_lock(&ctx->mapping_lock);
441 file->private_data = ctx;
443 ctx->cntl = inode->i_mapping;
444 mutex_unlock(&ctx->mapping_lock);
445 return spufs_attr_open(inode, file, spufs_cntl_get,
446 spufs_cntl_set, "0x%08lx");
450 spufs_cntl_release(struct inode *inode, struct file *file)
452 struct spufs_inode_info *i = SPUFS_I(inode);
453 struct spu_context *ctx = i->i_ctx;
455 spufs_attr_release(inode, file);
457 mutex_lock(&ctx->mapping_lock);
460 mutex_unlock(&ctx->mapping_lock);
464 static const struct file_operations spufs_cntl_fops = {
465 .open = spufs_cntl_open,
466 .release = spufs_cntl_release,
467 .read = spufs_attr_read,
468 .write = spufs_attr_write,
469 .mmap = spufs_cntl_mmap,
473 spufs_regs_open(struct inode *inode, struct file *file)
475 struct spufs_inode_info *i = SPUFS_I(inode);
476 file->private_data = i->i_ctx;
481 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
482 size_t size, loff_t *pos)
484 struct spu_lscsa *lscsa = ctx->csa.lscsa;
485 return simple_read_from_buffer(buffer, size, pos,
486 lscsa->gprs, sizeof lscsa->gprs);
490 spufs_regs_read(struct file *file, char __user *buffer,
491 size_t size, loff_t *pos)
494 struct spu_context *ctx = file->private_data;
496 spu_acquire_saved(ctx);
497 ret = __spufs_regs_read(ctx, buffer, size, pos);
498 spu_release_saved(ctx);
503 spufs_regs_write(struct file *file, const char __user *buffer,
504 size_t size, loff_t *pos)
506 struct spu_context *ctx = file->private_data;
507 struct spu_lscsa *lscsa = ctx->csa.lscsa;
510 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
515 spu_acquire_saved(ctx);
517 ret = copy_from_user(lscsa->gprs + *pos - size,
518 buffer, size) ? -EFAULT : size;
520 spu_release_saved(ctx);
524 static const struct file_operations spufs_regs_fops = {
525 .open = spufs_regs_open,
526 .read = spufs_regs_read,
527 .write = spufs_regs_write,
528 .llseek = generic_file_llseek,
532 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
533 size_t size, loff_t * pos)
535 struct spu_lscsa *lscsa = ctx->csa.lscsa;
536 return simple_read_from_buffer(buffer, size, pos,
537 &lscsa->fpcr, sizeof(lscsa->fpcr));
541 spufs_fpcr_read(struct file *file, char __user * buffer,
542 size_t size, loff_t * pos)
545 struct spu_context *ctx = file->private_data;
547 spu_acquire_saved(ctx);
548 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
549 spu_release_saved(ctx);
554 spufs_fpcr_write(struct file *file, const char __user * buffer,
555 size_t size, loff_t * pos)
557 struct spu_context *ctx = file->private_data;
558 struct spu_lscsa *lscsa = ctx->csa.lscsa;
561 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
566 spu_acquire_saved(ctx);
568 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
569 buffer, size) ? -EFAULT : size;
571 spu_release_saved(ctx);
575 static const struct file_operations spufs_fpcr_fops = {
576 .open = spufs_regs_open,
577 .read = spufs_fpcr_read,
578 .write = spufs_fpcr_write,
579 .llseek = generic_file_llseek,
582 /* generic open function for all pipe-like files */
583 static int spufs_pipe_open(struct inode *inode, struct file *file)
585 struct spufs_inode_info *i = SPUFS_I(inode);
586 file->private_data = i->i_ctx;
588 return nonseekable_open(inode, file);
592 * Read as many bytes from the mailbox as possible, until
593 * one of the conditions becomes true:
595 * - no more data available in the mailbox
596 * - end of the user provided buffer
597 * - end of the mapped area
599 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
600 size_t len, loff_t *pos)
602 struct spu_context *ctx = file->private_data;
603 u32 mbox_data, __user *udata;
609 if (!access_ok(VERIFY_WRITE, buf, len))
612 udata = (void __user *)buf;
615 for (count = 0; (count + 4) <= len; count += 4, udata++) {
617 ret = ctx->ops->mbox_read(ctx, &mbox_data);
622 * at the end of the mapped area, we can fault
623 * but still need to return the data we have
624 * read successfully so far.
626 ret = __put_user(mbox_data, udata);
641 static const struct file_operations spufs_mbox_fops = {
642 .open = spufs_pipe_open,
643 .read = spufs_mbox_read,
646 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
647 size_t len, loff_t *pos)
649 struct spu_context *ctx = file->private_data;
657 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
661 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
667 static const struct file_operations spufs_mbox_stat_fops = {
668 .open = spufs_pipe_open,
669 .read = spufs_mbox_stat_read,
672 /* low-level ibox access function */
673 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
675 return ctx->ops->ibox_read(ctx, data);
678 static int spufs_ibox_fasync(int fd, struct file *file, int on)
680 struct spu_context *ctx = file->private_data;
682 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
685 /* interrupt-level ibox callback function. */
686 void spufs_ibox_callback(struct spu *spu)
688 struct spu_context *ctx = spu->ctx;
693 wake_up_all(&ctx->ibox_wq);
694 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
698 * Read as many bytes from the interrupt mailbox as possible, until
699 * one of the conditions becomes true:
701 * - no more data available in the mailbox
702 * - end of the user provided buffer
703 * - end of the mapped area
705 * If the file is opened without O_NONBLOCK, we wait here until
706 * any data is available, but return when we have been able to
709 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
710 size_t len, loff_t *pos)
712 struct spu_context *ctx = file->private_data;
713 u32 ibox_data, __user *udata;
719 if (!access_ok(VERIFY_WRITE, buf, len))
722 udata = (void __user *)buf;
726 /* wait only for the first element */
728 if (file->f_flags & O_NONBLOCK) {
729 if (!spu_ibox_read(ctx, &ibox_data))
732 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
737 /* if we can't write at all, return -EFAULT */
738 count = __put_user(ibox_data, udata);
742 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
744 ret = ctx->ops->ibox_read(ctx, &ibox_data);
748 * at the end of the mapped area, we can fault
749 * but still need to return the data we have
750 * read successfully so far.
752 ret = __put_user(ibox_data, udata);
763 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
765 struct spu_context *ctx = file->private_data;
768 poll_wait(file, &ctx->ibox_wq, wait);
771 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
777 static const struct file_operations spufs_ibox_fops = {
778 .open = spufs_pipe_open,
779 .read = spufs_ibox_read,
780 .poll = spufs_ibox_poll,
781 .fasync = spufs_ibox_fasync,
784 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
785 size_t len, loff_t *pos)
787 struct spu_context *ctx = file->private_data;
794 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
797 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
803 static const struct file_operations spufs_ibox_stat_fops = {
804 .open = spufs_pipe_open,
805 .read = spufs_ibox_stat_read,
808 /* low-level mailbox write */
809 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
811 return ctx->ops->wbox_write(ctx, data);
814 static int spufs_wbox_fasync(int fd, struct file *file, int on)
816 struct spu_context *ctx = file->private_data;
819 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
824 /* interrupt-level wbox callback function. */
825 void spufs_wbox_callback(struct spu *spu)
827 struct spu_context *ctx = spu->ctx;
832 wake_up_all(&ctx->wbox_wq);
833 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
837 * Write as many bytes to the interrupt mailbox as possible, until
838 * one of the conditions becomes true:
840 * - the mailbox is full
841 * - end of the user provided buffer
842 * - end of the mapped area
844 * If the file is opened without O_NONBLOCK, we wait here until
845 * space is availabyl, but return when we have been able to
848 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
849 size_t len, loff_t *pos)
851 struct spu_context *ctx = file->private_data;
852 u32 wbox_data, __user *udata;
858 udata = (void __user *)buf;
859 if (!access_ok(VERIFY_READ, buf, len))
862 if (__get_user(wbox_data, udata))
868 * make sure we can at least write one element, by waiting
869 * in case of !O_NONBLOCK
872 if (file->f_flags & O_NONBLOCK) {
873 if (!spu_wbox_write(ctx, wbox_data))
876 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
882 /* write as much as possible */
883 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
885 ret = __get_user(wbox_data, udata);
889 ret = spu_wbox_write(ctx, wbox_data);
899 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
901 struct spu_context *ctx = file->private_data;
904 poll_wait(file, &ctx->wbox_wq, wait);
907 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
913 static const struct file_operations spufs_wbox_fops = {
914 .open = spufs_pipe_open,
915 .write = spufs_wbox_write,
916 .poll = spufs_wbox_poll,
917 .fasync = spufs_wbox_fasync,
920 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
921 size_t len, loff_t *pos)
923 struct spu_context *ctx = file->private_data;
930 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
933 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
939 static const struct file_operations spufs_wbox_stat_fops = {
940 .open = spufs_pipe_open,
941 .read = spufs_wbox_stat_read,
944 static int spufs_signal1_open(struct inode *inode, struct file *file)
946 struct spufs_inode_info *i = SPUFS_I(inode);
947 struct spu_context *ctx = i->i_ctx;
949 mutex_lock(&ctx->mapping_lock);
950 file->private_data = ctx;
952 ctx->signal1 = inode->i_mapping;
953 mutex_unlock(&ctx->mapping_lock);
954 return nonseekable_open(inode, file);
958 spufs_signal1_release(struct inode *inode, struct file *file)
960 struct spufs_inode_info *i = SPUFS_I(inode);
961 struct spu_context *ctx = i->i_ctx;
963 mutex_lock(&ctx->mapping_lock);
966 mutex_unlock(&ctx->mapping_lock);
970 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
971 size_t len, loff_t *pos)
979 if (ctx->csa.spu_chnlcnt_RW[3]) {
980 data = ctx->csa.spu_chnldata_RW[3];
987 if (copy_to_user(buf, &data, 4))
994 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
995 size_t len, loff_t *pos)
998 struct spu_context *ctx = file->private_data;
1000 spu_acquire_saved(ctx);
1001 ret = __spufs_signal1_read(ctx, buf, len, pos);
1002 spu_release_saved(ctx);
1007 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1008 size_t len, loff_t *pos)
1010 struct spu_context *ctx;
1013 ctx = file->private_data;
1018 if (copy_from_user(&data, buf, 4))
1022 ctx->ops->signal1_write(ctx, data);
1028 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
1029 unsigned long address)
1031 #if PAGE_SIZE == 0x1000
1032 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
1033 #elif PAGE_SIZE == 0x10000
1034 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1035 * signal 1 and 2 area
1037 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1039 #error unsupported page size
1043 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
1044 .nopfn = spufs_signal1_mmap_nopfn,
1047 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1049 if (!(vma->vm_flags & VM_SHARED))
1052 vma->vm_flags |= VM_IO | VM_PFNMAP;
1053 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1054 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1056 vma->vm_ops = &spufs_signal1_mmap_vmops;
1060 static const struct file_operations spufs_signal1_fops = {
1061 .open = spufs_signal1_open,
1062 .release = spufs_signal1_release,
1063 .read = spufs_signal1_read,
1064 .write = spufs_signal1_write,
1065 .mmap = spufs_signal1_mmap,
1068 static const struct file_operations spufs_signal1_nosched_fops = {
1069 .open = spufs_signal1_open,
1070 .release = spufs_signal1_release,
1071 .write = spufs_signal1_write,
1072 .mmap = spufs_signal1_mmap,
1075 static int spufs_signal2_open(struct inode *inode, struct file *file)
1077 struct spufs_inode_info *i = SPUFS_I(inode);
1078 struct spu_context *ctx = i->i_ctx;
1080 mutex_lock(&ctx->mapping_lock);
1081 file->private_data = ctx;
1082 if (!i->i_openers++)
1083 ctx->signal2 = inode->i_mapping;
1084 mutex_unlock(&ctx->mapping_lock);
1085 return nonseekable_open(inode, file);
1089 spufs_signal2_release(struct inode *inode, struct file *file)
1091 struct spufs_inode_info *i = SPUFS_I(inode);
1092 struct spu_context *ctx = i->i_ctx;
1094 mutex_lock(&ctx->mapping_lock);
1095 if (!--i->i_openers)
1096 ctx->signal2 = NULL;
1097 mutex_unlock(&ctx->mapping_lock);
1101 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1102 size_t len, loff_t *pos)
1110 if (ctx->csa.spu_chnlcnt_RW[4]) {
1111 data = ctx->csa.spu_chnldata_RW[4];
1118 if (copy_to_user(buf, &data, 4))
1125 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1126 size_t len, loff_t *pos)
1128 struct spu_context *ctx = file->private_data;
1131 spu_acquire_saved(ctx);
1132 ret = __spufs_signal2_read(ctx, buf, len, pos);
1133 spu_release_saved(ctx);
1138 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1139 size_t len, loff_t *pos)
1141 struct spu_context *ctx;
1144 ctx = file->private_data;
1149 if (copy_from_user(&data, buf, 4))
1153 ctx->ops->signal2_write(ctx, data);
1160 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1161 unsigned long address)
1163 #if PAGE_SIZE == 0x1000
1164 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1165 #elif PAGE_SIZE == 0x10000
1166 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1167 * signal 1 and 2 area
1169 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1171 #error unsupported page size
1175 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1176 .nopfn = spufs_signal2_mmap_nopfn,
1179 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1181 if (!(vma->vm_flags & VM_SHARED))
1184 vma->vm_flags |= VM_IO | VM_PFNMAP;
1185 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1186 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1188 vma->vm_ops = &spufs_signal2_mmap_vmops;
1191 #else /* SPUFS_MMAP_4K */
1192 #define spufs_signal2_mmap NULL
1193 #endif /* !SPUFS_MMAP_4K */
1195 static const struct file_operations spufs_signal2_fops = {
1196 .open = spufs_signal2_open,
1197 .release = spufs_signal2_release,
1198 .read = spufs_signal2_read,
1199 .write = spufs_signal2_write,
1200 .mmap = spufs_signal2_mmap,
1203 static const struct file_operations spufs_signal2_nosched_fops = {
1204 .open = spufs_signal2_open,
1205 .release = spufs_signal2_release,
1206 .write = spufs_signal2_write,
1207 .mmap = spufs_signal2_mmap,
1211 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1212 * work of acquiring (or not) the SPU context before calling through
1213 * to the actual get routine. The set routine is called directly.
1215 #define SPU_ATTR_NOACQUIRE 0
1216 #define SPU_ATTR_ACQUIRE 1
1217 #define SPU_ATTR_ACQUIRE_SAVED 2
1219 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1220 static int __##__get(void *data, u64 *val) \
1222 struct spu_context *ctx = data; \
1224 if (__acquire == SPU_ATTR_ACQUIRE) { \
1226 *val = __get(ctx); \
1228 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1229 spu_acquire_saved(ctx); \
1230 *val = __get(ctx); \
1231 spu_release_saved(ctx); \
1233 *val = __get(ctx); \
1237 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1239 static int spufs_signal1_type_set(void *data, u64 val)
1241 struct spu_context *ctx = data;
1244 ctx->ops->signal1_type_set(ctx, val);
1250 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1252 return ctx->ops->signal1_type_get(ctx);
1254 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1255 spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
1258 static int spufs_signal2_type_set(void *data, u64 val)
1260 struct spu_context *ctx = data;
1263 ctx->ops->signal2_type_set(ctx, val);
1269 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1271 return ctx->ops->signal2_type_get(ctx);
1273 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1274 spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
1277 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1278 unsigned long address)
1280 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1283 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1284 .nopfn = spufs_mss_mmap_nopfn,
1288 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1290 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1292 if (!(vma->vm_flags & VM_SHARED))
1295 vma->vm_flags |= VM_IO | VM_PFNMAP;
1296 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1297 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1299 vma->vm_ops = &spufs_mss_mmap_vmops;
1302 #else /* SPUFS_MMAP_4K */
1303 #define spufs_mss_mmap NULL
1304 #endif /* !SPUFS_MMAP_4K */
1306 static int spufs_mss_open(struct inode *inode, struct file *file)
1308 struct spufs_inode_info *i = SPUFS_I(inode);
1309 struct spu_context *ctx = i->i_ctx;
1311 file->private_data = i->i_ctx;
1313 mutex_lock(&ctx->mapping_lock);
1314 if (!i->i_openers++)
1315 ctx->mss = inode->i_mapping;
1316 mutex_unlock(&ctx->mapping_lock);
1317 return nonseekable_open(inode, file);
1321 spufs_mss_release(struct inode *inode, struct file *file)
1323 struct spufs_inode_info *i = SPUFS_I(inode);
1324 struct spu_context *ctx = i->i_ctx;
1326 mutex_lock(&ctx->mapping_lock);
1327 if (!--i->i_openers)
1329 mutex_unlock(&ctx->mapping_lock);
1333 static const struct file_operations spufs_mss_fops = {
1334 .open = spufs_mss_open,
1335 .release = spufs_mss_release,
1336 .mmap = spufs_mss_mmap,
1339 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1340 unsigned long address)
1342 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1345 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1346 .nopfn = spufs_psmap_mmap_nopfn,
1350 * mmap support for full problem state area [0x00000 - 0x1ffff].
1352 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1354 if (!(vma->vm_flags & VM_SHARED))
1357 vma->vm_flags |= VM_IO | VM_PFNMAP;
1358 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1359 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1361 vma->vm_ops = &spufs_psmap_mmap_vmops;
1365 static int spufs_psmap_open(struct inode *inode, struct file *file)
1367 struct spufs_inode_info *i = SPUFS_I(inode);
1368 struct spu_context *ctx = i->i_ctx;
1370 mutex_lock(&ctx->mapping_lock);
1371 file->private_data = i->i_ctx;
1372 if (!i->i_openers++)
1373 ctx->psmap = inode->i_mapping;
1374 mutex_unlock(&ctx->mapping_lock);
1375 return nonseekable_open(inode, file);
1379 spufs_psmap_release(struct inode *inode, struct file *file)
1381 struct spufs_inode_info *i = SPUFS_I(inode);
1382 struct spu_context *ctx = i->i_ctx;
1384 mutex_lock(&ctx->mapping_lock);
1385 if (!--i->i_openers)
1387 mutex_unlock(&ctx->mapping_lock);
1391 static const struct file_operations spufs_psmap_fops = {
1392 .open = spufs_psmap_open,
1393 .release = spufs_psmap_release,
1394 .mmap = spufs_psmap_mmap,
1399 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1400 unsigned long address)
1402 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1405 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1406 .nopfn = spufs_mfc_mmap_nopfn,
1410 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1412 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1414 if (!(vma->vm_flags & VM_SHARED))
1417 vma->vm_flags |= VM_IO | VM_PFNMAP;
1418 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1419 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1421 vma->vm_ops = &spufs_mfc_mmap_vmops;
1424 #else /* SPUFS_MMAP_4K */
1425 #define spufs_mfc_mmap NULL
1426 #endif /* !SPUFS_MMAP_4K */
1428 static int spufs_mfc_open(struct inode *inode, struct file *file)
1430 struct spufs_inode_info *i = SPUFS_I(inode);
1431 struct spu_context *ctx = i->i_ctx;
1433 /* we don't want to deal with DMA into other processes */
1434 if (ctx->owner != current->mm)
1437 if (atomic_read(&inode->i_count) != 1)
1440 mutex_lock(&ctx->mapping_lock);
1441 file->private_data = ctx;
1442 if (!i->i_openers++)
1443 ctx->mfc = inode->i_mapping;
1444 mutex_unlock(&ctx->mapping_lock);
1445 return nonseekable_open(inode, file);
1449 spufs_mfc_release(struct inode *inode, struct file *file)
1451 struct spufs_inode_info *i = SPUFS_I(inode);
1452 struct spu_context *ctx = i->i_ctx;
1454 mutex_lock(&ctx->mapping_lock);
1455 if (!--i->i_openers)
1457 mutex_unlock(&ctx->mapping_lock);
1461 /* interrupt-level mfc callback function. */
1462 void spufs_mfc_callback(struct spu *spu)
1464 struct spu_context *ctx = spu->ctx;
1469 wake_up_all(&ctx->mfc_wq);
1471 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1472 if (ctx->mfc_fasync) {
1473 u32 free_elements, tagstatus;
1476 /* no need for spu_acquire in interrupt context */
1477 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1478 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1481 if (free_elements & 0xffff)
1483 if (tagstatus & ctx->tagwait)
1486 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1490 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1492 /* See if there is one tag group is complete */
1493 /* FIXME we need locking around tagwait */
1494 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1495 ctx->tagwait &= ~*status;
1499 /* enable interrupt waiting for any tag group,
1500 may silently fail if interrupts are already enabled */
1501 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1505 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1506 size_t size, loff_t *pos)
1508 struct spu_context *ctx = file->private_data;
1516 if (file->f_flags & O_NONBLOCK) {
1517 status = ctx->ops->read_mfc_tagstatus(ctx);
1518 if (!(status & ctx->tagwait))
1521 ctx->tagwait &= ~status;
1523 ret = spufs_wait(ctx->mfc_wq,
1524 spufs_read_mfc_tagstatus(ctx, &status));
1532 if (copy_to_user(buffer, &status, 4))
1539 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1541 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1542 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1553 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1557 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1558 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1563 switch (cmd->size & 0xf) {
1584 pr_debug("invalid DMA alignment %x for size %x\n",
1585 cmd->lsa & 0xf, cmd->size);
1589 if (cmd->size > 16 * 1024) {
1590 pr_debug("invalid DMA size %x\n", cmd->size);
1594 if (cmd->tag & 0xfff0) {
1595 /* we reserve the higher tag numbers for kernel use */
1596 pr_debug("invalid DMA tag\n");
1601 /* not supported in this version */
1602 pr_debug("invalid DMA class\n");
1609 static int spu_send_mfc_command(struct spu_context *ctx,
1610 struct mfc_dma_command cmd,
1613 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1614 if (*error == -EAGAIN) {
1615 /* wait for any tag group to complete
1616 so we have space for the new command */
1617 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1618 /* try again, because the queue might be
1620 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1621 if (*error == -EAGAIN)
1627 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1628 size_t size, loff_t *pos)
1630 struct spu_context *ctx = file->private_data;
1631 struct mfc_dma_command cmd;
1634 if (size != sizeof cmd)
1638 if (copy_from_user(&cmd, buffer, sizeof cmd))
1641 ret = spufs_check_valid_dma(&cmd);
1646 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1650 if (file->f_flags & O_NONBLOCK) {
1651 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1654 ret = spufs_wait(ctx->mfc_wq,
1655 spu_send_mfc_command(ctx, cmd, &status));
1663 ctx->tagwait |= 1 << cmd.tag;
1672 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1674 struct spu_context *ctx = file->private_data;
1675 u32 free_elements, tagstatus;
1678 poll_wait(file, &ctx->mfc_wq, wait);
1681 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1682 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1683 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1687 if (free_elements & 0xffff)
1688 mask |= POLLOUT | POLLWRNORM;
1689 if (tagstatus & ctx->tagwait)
1690 mask |= POLLIN | POLLRDNORM;
1692 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1693 free_elements, tagstatus, ctx->tagwait);
1698 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1700 struct spu_context *ctx = file->private_data;
1705 /* this currently hangs */
1706 ret = spufs_wait(ctx->mfc_wq,
1707 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1710 ret = spufs_wait(ctx->mfc_wq,
1711 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1721 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1724 return spufs_mfc_flush(file, NULL);
1727 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1729 struct spu_context *ctx = file->private_data;
1731 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1734 static const struct file_operations spufs_mfc_fops = {
1735 .open = spufs_mfc_open,
1736 .release = spufs_mfc_release,
1737 .read = spufs_mfc_read,
1738 .write = spufs_mfc_write,
1739 .poll = spufs_mfc_poll,
1740 .flush = spufs_mfc_flush,
1741 .fsync = spufs_mfc_fsync,
1742 .fasync = spufs_mfc_fasync,
1743 .mmap = spufs_mfc_mmap,
1746 static int spufs_npc_set(void *data, u64 val)
1748 struct spu_context *ctx = data;
1750 ctx->ops->npc_write(ctx, val);
1756 static u64 spufs_npc_get(struct spu_context *ctx)
1758 return ctx->ops->npc_read(ctx);
1760 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1761 "0x%llx\n", SPU_ATTR_ACQUIRE);
1763 static int spufs_decr_set(void *data, u64 val)
1765 struct spu_context *ctx = data;
1766 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1767 spu_acquire_saved(ctx);
1768 lscsa->decr.slot[0] = (u32) val;
1769 spu_release_saved(ctx);
1774 static u64 spufs_decr_get(struct spu_context *ctx)
1776 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1777 return lscsa->decr.slot[0];
1779 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1780 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1782 static int spufs_decr_status_set(void *data, u64 val)
1784 struct spu_context *ctx = data;
1785 spu_acquire_saved(ctx);
1787 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1789 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1790 spu_release_saved(ctx);
1795 static u64 spufs_decr_status_get(struct spu_context *ctx)
1797 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1798 return SPU_DECR_STATUS_RUNNING;
1802 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1803 spufs_decr_status_set, "0x%llx\n",
1804 SPU_ATTR_ACQUIRE_SAVED);
1806 static int spufs_event_mask_set(void *data, u64 val)
1808 struct spu_context *ctx = data;
1809 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1810 spu_acquire_saved(ctx);
1811 lscsa->event_mask.slot[0] = (u32) val;
1812 spu_release_saved(ctx);
1817 static u64 spufs_event_mask_get(struct spu_context *ctx)
1819 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1820 return lscsa->event_mask.slot[0];
1823 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1824 spufs_event_mask_set, "0x%llx\n",
1825 SPU_ATTR_ACQUIRE_SAVED);
1827 static u64 spufs_event_status_get(struct spu_context *ctx)
1829 struct spu_state *state = &ctx->csa;
1831 stat = state->spu_chnlcnt_RW[0];
1833 return state->spu_chnldata_RW[0];
1836 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1837 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1839 static int spufs_srr0_set(void *data, u64 val)
1841 struct spu_context *ctx = data;
1842 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1843 spu_acquire_saved(ctx);
1844 lscsa->srr0.slot[0] = (u32) val;
1845 spu_release_saved(ctx);
1850 static u64 spufs_srr0_get(struct spu_context *ctx)
1852 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1853 return lscsa->srr0.slot[0];
1855 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1856 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1858 static u64 spufs_id_get(struct spu_context *ctx)
1862 if (ctx->state == SPU_STATE_RUNNABLE)
1863 num = ctx->spu->number;
1865 num = (unsigned int)-1;
1869 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1872 static u64 spufs_object_id_get(struct spu_context *ctx)
1874 /* FIXME: Should there really be no locking here? */
1875 return ctx->object_id;
1878 static int spufs_object_id_set(void *data, u64 id)
1880 struct spu_context *ctx = data;
1881 ctx->object_id = id;
1886 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1887 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1889 static u64 spufs_lslr_get(struct spu_context *ctx)
1891 return ctx->csa.priv2.spu_lslr_RW;
1893 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1894 SPU_ATTR_ACQUIRE_SAVED);
1896 static int spufs_info_open(struct inode *inode, struct file *file)
1898 struct spufs_inode_info *i = SPUFS_I(inode);
1899 struct spu_context *ctx = i->i_ctx;
1900 file->private_data = ctx;
1904 static int spufs_caps_show(struct seq_file *s, void *private)
1906 struct spu_context *ctx = s->private;
1908 if (!(ctx->flags & SPU_CREATE_NOSCHED))
1909 seq_puts(s, "sched\n");
1910 if (!(ctx->flags & SPU_CREATE_ISOLATE))
1911 seq_puts(s, "step\n");
1915 static int spufs_caps_open(struct inode *inode, struct file *file)
1917 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1920 static const struct file_operations spufs_caps_fops = {
1921 .open = spufs_caps_open,
1923 .llseek = seq_lseek,
1924 .release = single_release,
1927 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1928 char __user *buf, size_t len, loff_t *pos)
1933 mbox_stat = ctx->csa.prob.mb_stat_R;
1934 if (mbox_stat & 0x0000ff) {
1935 data = ctx->csa.prob.pu_mb_R;
1938 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1941 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1942 size_t len, loff_t *pos)
1945 struct spu_context *ctx = file->private_data;
1947 if (!access_ok(VERIFY_WRITE, buf, len))
1950 spu_acquire_saved(ctx);
1951 spin_lock(&ctx->csa.register_lock);
1952 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1953 spin_unlock(&ctx->csa.register_lock);
1954 spu_release_saved(ctx);
1959 static const struct file_operations spufs_mbox_info_fops = {
1960 .open = spufs_info_open,
1961 .read = spufs_mbox_info_read,
1962 .llseek = generic_file_llseek,
1965 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1966 char __user *buf, size_t len, loff_t *pos)
1971 ibox_stat = ctx->csa.prob.mb_stat_R;
1972 if (ibox_stat & 0xff0000) {
1973 data = ctx->csa.priv2.puint_mb_R;
1976 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1979 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1980 size_t len, loff_t *pos)
1982 struct spu_context *ctx = file->private_data;
1985 if (!access_ok(VERIFY_WRITE, buf, len))
1988 spu_acquire_saved(ctx);
1989 spin_lock(&ctx->csa.register_lock);
1990 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1991 spin_unlock(&ctx->csa.register_lock);
1992 spu_release_saved(ctx);
1997 static const struct file_operations spufs_ibox_info_fops = {
1998 .open = spufs_info_open,
1999 .read = spufs_ibox_info_read,
2000 .llseek = generic_file_llseek,
2003 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2004 char __user *buf, size_t len, loff_t *pos)
2010 wbox_stat = ctx->csa.prob.mb_stat_R;
2011 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2012 for (i = 0; i < cnt; i++) {
2013 data[i] = ctx->csa.spu_mailbox_data[i];
2016 return simple_read_from_buffer(buf, len, pos, &data,
2020 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2021 size_t len, loff_t *pos)
2023 struct spu_context *ctx = file->private_data;
2026 if (!access_ok(VERIFY_WRITE, buf, len))
2029 spu_acquire_saved(ctx);
2030 spin_lock(&ctx->csa.register_lock);
2031 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2032 spin_unlock(&ctx->csa.register_lock);
2033 spu_release_saved(ctx);
2038 static const struct file_operations spufs_wbox_info_fops = {
2039 .open = spufs_info_open,
2040 .read = spufs_wbox_info_read,
2041 .llseek = generic_file_llseek,
2044 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2045 char __user *buf, size_t len, loff_t *pos)
2047 struct spu_dma_info info;
2048 struct mfc_cq_sr *qp, *spuqp;
2051 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2052 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2053 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2054 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2055 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2056 for (i = 0; i < 16; i++) {
2057 qp = &info.dma_info_command_data[i];
2058 spuqp = &ctx->csa.priv2.spuq[i];
2060 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2061 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2062 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2063 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2066 return simple_read_from_buffer(buf, len, pos, &info,
2070 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2071 size_t len, loff_t *pos)
2073 struct spu_context *ctx = file->private_data;
2076 if (!access_ok(VERIFY_WRITE, buf, len))
2079 spu_acquire_saved(ctx);
2080 spin_lock(&ctx->csa.register_lock);
2081 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2082 spin_unlock(&ctx->csa.register_lock);
2083 spu_release_saved(ctx);
2088 static const struct file_operations spufs_dma_info_fops = {
2089 .open = spufs_info_open,
2090 .read = spufs_dma_info_read,
2093 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2094 char __user *buf, size_t len, loff_t *pos)
2096 struct spu_proxydma_info info;
2097 struct mfc_cq_sr *qp, *puqp;
2098 int ret = sizeof info;
2104 if (!access_ok(VERIFY_WRITE, buf, len))
2107 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2108 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2109 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2110 for (i = 0; i < 8; i++) {
2111 qp = &info.proxydma_info_command_data[i];
2112 puqp = &ctx->csa.priv2.puq[i];
2114 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2115 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2116 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2117 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2120 return simple_read_from_buffer(buf, len, pos, &info,
2124 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2125 size_t len, loff_t *pos)
2127 struct spu_context *ctx = file->private_data;
2130 spu_acquire_saved(ctx);
2131 spin_lock(&ctx->csa.register_lock);
2132 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2133 spin_unlock(&ctx->csa.register_lock);
2134 spu_release_saved(ctx);
2139 static const struct file_operations spufs_proxydma_info_fops = {
2140 .open = spufs_info_open,
2141 .read = spufs_proxydma_info_read,
2144 static int spufs_show_tid(struct seq_file *s, void *private)
2146 struct spu_context *ctx = s->private;
2148 seq_printf(s, "%d\n", ctx->tid);
2152 static int spufs_tid_open(struct inode *inode, struct file *file)
2154 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2157 static const struct file_operations spufs_tid_fops = {
2158 .open = spufs_tid_open,
2160 .llseek = seq_lseek,
2161 .release = single_release,
2164 static const char *ctx_state_names[] = {
2165 "user", "system", "iowait", "loaded"
2168 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2169 enum spu_utilization_state state)
2172 unsigned long long time = ctx->stats.times[state];
2175 * In general, utilization statistics are updated by the controlling
2176 * thread as the spu context moves through various well defined
2177 * state transitions, but if the context is lazily loaded its
2178 * utilization statistics are not updated as the controlling thread
2179 * is not tightly coupled with the execution of the spu context. We
2180 * calculate and apply the time delta from the last recorded state
2181 * of the spu context.
2183 if (ctx->spu && ctx->stats.util_state == state) {
2185 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2188 return time / NSEC_PER_MSEC;
2191 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2193 unsigned long long slb_flts = ctx->stats.slb_flt;
2195 if (ctx->state == SPU_STATE_RUNNABLE) {
2196 slb_flts += (ctx->spu->stats.slb_flt -
2197 ctx->stats.slb_flt_base);
2203 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2205 unsigned long long class2_intrs = ctx->stats.class2_intr;
2207 if (ctx->state == SPU_STATE_RUNNABLE) {
2208 class2_intrs += (ctx->spu->stats.class2_intr -
2209 ctx->stats.class2_intr_base);
2212 return class2_intrs;
2216 static int spufs_show_stat(struct seq_file *s, void *private)
2218 struct spu_context *ctx = s->private;
2221 seq_printf(s, "%s %llu %llu %llu %llu "
2222 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2223 ctx_state_names[ctx->stats.util_state],
2224 spufs_acct_time(ctx, SPU_UTIL_USER),
2225 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2226 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2227 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2228 ctx->stats.vol_ctx_switch,
2229 ctx->stats.invol_ctx_switch,
2230 spufs_slb_flts(ctx),
2231 ctx->stats.hash_flt,
2234 spufs_class2_intrs(ctx),
2235 ctx->stats.libassist);
2240 static int spufs_stat_open(struct inode *inode, struct file *file)
2242 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2245 static const struct file_operations spufs_stat_fops = {
2246 .open = spufs_stat_open,
2248 .llseek = seq_lseek,
2249 .release = single_release,
2253 struct tree_descr spufs_dir_contents[] = {
2254 { "capabilities", &spufs_caps_fops, 0444, },
2255 { "mem", &spufs_mem_fops, 0666, },
2256 { "regs", &spufs_regs_fops, 0666, },
2257 { "mbox", &spufs_mbox_fops, 0444, },
2258 { "ibox", &spufs_ibox_fops, 0444, },
2259 { "wbox", &spufs_wbox_fops, 0222, },
2260 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2261 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2262 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2263 { "signal1", &spufs_signal1_fops, 0666, },
2264 { "signal2", &spufs_signal2_fops, 0666, },
2265 { "signal1_type", &spufs_signal1_type, 0666, },
2266 { "signal2_type", &spufs_signal2_type, 0666, },
2267 { "cntl", &spufs_cntl_fops, 0666, },
2268 { "fpcr", &spufs_fpcr_fops, 0666, },
2269 { "lslr", &spufs_lslr_ops, 0444, },
2270 { "mfc", &spufs_mfc_fops, 0666, },
2271 { "mss", &spufs_mss_fops, 0666, },
2272 { "npc", &spufs_npc_ops, 0666, },
2273 { "srr0", &spufs_srr0_ops, 0666, },
2274 { "decr", &spufs_decr_ops, 0666, },
2275 { "decr_status", &spufs_decr_status_ops, 0666, },
2276 { "event_mask", &spufs_event_mask_ops, 0666, },
2277 { "event_status", &spufs_event_status_ops, 0444, },
2278 { "psmap", &spufs_psmap_fops, 0666, },
2279 { "phys-id", &spufs_id_ops, 0666, },
2280 { "object-id", &spufs_object_id_ops, 0666, },
2281 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2282 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2283 { "wbox_info", &spufs_wbox_info_fops, 0444, },
2284 { "dma_info", &spufs_dma_info_fops, 0444, },
2285 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2286 { "tid", &spufs_tid_fops, 0444, },
2287 { "stat", &spufs_stat_fops, 0444, },
2291 struct tree_descr spufs_dir_nosched_contents[] = {
2292 { "capabilities", &spufs_caps_fops, 0444, },
2293 { "mem", &spufs_mem_fops, 0666, },
2294 { "mbox", &spufs_mbox_fops, 0444, },
2295 { "ibox", &spufs_ibox_fops, 0444, },
2296 { "wbox", &spufs_wbox_fops, 0222, },
2297 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2298 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2299 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2300 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2301 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2302 { "signal1_type", &spufs_signal1_type, 0666, },
2303 { "signal2_type", &spufs_signal2_type, 0666, },
2304 { "mss", &spufs_mss_fops, 0666, },
2305 { "mfc", &spufs_mfc_fops, 0666, },
2306 { "cntl", &spufs_cntl_fops, 0666, },
2307 { "npc", &spufs_npc_ops, 0666, },
2308 { "psmap", &spufs_psmap_fops, 0666, },
2309 { "phys-id", &spufs_id_ops, 0666, },
2310 { "object-id", &spufs_object_id_ops, 0666, },
2311 { "tid", &spufs_tid_fops, 0444, },
2312 { "stat", &spufs_stat_fops, 0444, },
2316 struct spufs_coredump_reader spufs_coredump_read[] = {
2317 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2318 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2319 { "lslr", NULL, spufs_lslr_get, 19 },
2320 { "decr", NULL, spufs_decr_get, 19 },
2321 { "decr_status", NULL, spufs_decr_status_get, 19 },
2322 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2323 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2324 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2325 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2326 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2327 { "event_mask", NULL, spufs_event_mask_get, 19 },
2328 { "event_status", NULL, spufs_event_status_get, 19 },
2329 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2330 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2331 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2332 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2333 { "proxydma_info", __spufs_proxydma_info_read,
2334 NULL, sizeof(struct spu_proxydma_info)},
2335 { "object-id", NULL, spufs_object_id_get, 19 },
2336 { "npc", NULL, spufs_npc_get, 19 },