include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / platforms / cell / spufs / file.c
index 9533a8a..5c28082 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/poll.h>
 #include <linux/ptrace.h>
 #include <linux/seq_file.h>
-#include <linux/marker.h>
+#include <linux/slab.h>
 
 #include <asm/io.h>
 #include <asm/time.h>
@@ -38,6 +38,7 @@
 #include <asm/uaccess.h>
 
 #include "spufs.h"
+#include "sputrace.h"
 
 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
 
@@ -147,7 +148,7 @@ static int __fops ## _open(struct inode *inode, struct file *file)  \
        __simple_attr_check_format(__fmt, 0ull);                        \
        return spufs_attr_open(inode, file, __get, __set, __fmt);       \
 }                                                                      \
-static struct file_operations __fops = {                               \
+static const struct file_operations __fops = {                         \
        .owner   = THIS_MODULE,                                         \
        .open    = __fops ## _open,                                     \
        .release = spufs_attr_release,                                  \
@@ -273,12 +274,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_NOPAGE;
 
        if (ctx->state == SPU_STATE_SAVED) {
-               vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                                       & ~_PAGE_NO_CACHE);
+               vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
                pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
        } else {
-               vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                            | _PAGE_NO_CACHE);
+               vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
                pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
        }
        vm_insert_pfn(vma, address, pfn);
@@ -288,9 +287,32 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return VM_FAULT_NOPAGE;
 }
 
+static int spufs_mem_mmap_access(struct vm_area_struct *vma,
+                               unsigned long address,
+                               void *buf, int len, int write)
+{
+       struct spu_context *ctx = vma->vm_file->private_data;
+       unsigned long offset = address - vma->vm_start;
+       char *local_store;
 
-static struct vm_operations_struct spufs_mem_mmap_vmops = {
+       if (write && !(vma->vm_flags & VM_WRITE))
+               return -EACCES;
+       if (spu_acquire(ctx))
+               return -EINTR;
+       if ((offset + len) > vma->vm_end)
+               len = vma->vm_end - offset;
+       local_store = ctx->ops->get_ls(ctx);
+       if (write)
+               memcpy_toio(local_store + offset, buf, len);
+       else
+               memcpy_fromio(buf, local_store + offset, len);
+       spu_release(ctx);
+       return len;
+}
+
+static const struct vm_operations_struct spufs_mem_mmap_vmops = {
        .fault = spufs_mem_mmap_fault,
+       .access = spufs_mem_mmap_access,
 };
 
 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
@@ -315,8 +337,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE);
+       vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mem_mmap_vmops;
        return 0;
@@ -367,6 +388,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
        if (offset >= ps_size)
                return VM_FAULT_SIGBUS;
 
+       if (fatal_signal_pending(current))
+               return VM_FAULT_SIGBUS;
+
        /*
         * Because we release the mmap_sem, the context may be destroyed while
         * we're in spu_wait. Grab an extra reference so it isn't destroyed
@@ -413,7 +437,7 @@ static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
        return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_cntl_mmap_vmops = {
+static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
        .fault = spufs_cntl_mmap_fault,
 };
 
@@ -426,8 +450,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_cntl_mmap_vmops;
        return 0;
@@ -525,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
        int ret;
        struct spu_context *ctx = file->private_data;
 
+       /* pre-check for file position: if we'd return EOF, there's no point
+        * causing a deschedule */
+       if (*pos >= sizeof(ctx->csa.lscsa->gprs))
+               return 0;
+
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
@@ -541,16 +569,17 @@ spufs_regs_write(struct file *file, const char __user *buffer,
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
 
-       size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
-       if (size <= 0)
+       if (*pos >= sizeof(lscsa->gprs))
                return -EFBIG;
+
+       size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
        *pos += size;
 
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
 
-       ret = copy_from_user(lscsa->gprs + *pos - size,
+       ret = copy_from_user((char *)lscsa->gprs + *pos - size,
                             buffer, size) ? -EFAULT : size;
 
        spu_release_saved(ctx);
@@ -596,10 +625,11 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
 
-       size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
-       if (size <= 0)
+       if (*pos >= sizeof(lscsa->fpcr))
                return -EFBIG;
 
+       size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
+
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
@@ -1114,7 +1144,7 @@ spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 #endif
 }
 
-static struct vm_operations_struct spufs_signal1_mmap_vmops = {
+static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
        .fault = spufs_signal1_mmap_fault,
 };
 
@@ -1124,8 +1154,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal1_mmap_vmops;
        return 0;
@@ -1251,7 +1280,7 @@ spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 #endif
 }
 
-static struct vm_operations_struct spufs_signal2_mmap_vmops = {
+static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
        .fault = spufs_signal2_mmap_fault,
 };
 
@@ -1261,8 +1290,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal2_mmap_vmops;
        return 0;
@@ -1370,7 +1398,7 @@ spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_mss_mmap_vmops = {
+static const struct vm_operations_struct spufs_mss_mmap_vmops = {
        .fault = spufs_mss_mmap_fault,
 };
 
@@ -1383,8 +1411,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mss_mmap_vmops;
        return 0;
@@ -1432,7 +1459,7 @@ spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_psmap_mmap_vmops = {
+static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
        .fault = spufs_psmap_mmap_fault,
 };
 
@@ -1445,8 +1472,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_psmap_mmap_vmops;
        return 0;
@@ -1492,7 +1518,7 @@ spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_mfc_mmap_vmops = {
+static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
        .fault = spufs_mfc_mmap_fault,
 };
 
@@ -1505,8 +1531,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mfc_mmap_vmops;
        return 0;
@@ -1632,7 +1657,7 @@ out:
 
 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
 {
-       pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
+       pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
                 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
 
        switch (cmd->cmd) {
@@ -1649,7 +1674,7 @@ static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
        }
 
        if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
-               pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
+               pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
                                cmd->ea, cmd->lsa);
                return -EIO;
        }
@@ -2403,38 +2428,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
 static int spufs_switch_log_open(struct inode *inode, struct file *file)
 {
        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int rc;
+
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
 
-       /*
-        * We (ab-)use the mapping_lock here because it serves the similar
-        * purpose for synchronizing open/close elsewhere.  Maybe it should
-        * be renamed eventually.
-        */
-       mutex_lock(&ctx->mapping_lock);
        if (ctx->switch_log) {
-               spin_lock(&ctx->switch_log->lock);
-               ctx->switch_log->head = 0;
-               ctx->switch_log->tail = 0;
-               spin_unlock(&ctx->switch_log->lock);
-       } else {
-               /*
-                * We allocate the switch log data structures on first open.
-                * They will never be free because we assume a context will
-                * be traced until it goes away.
-                */
-               ctx->switch_log = kzalloc(sizeof(struct switch_log) +
-                       SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
-                       GFP_KERNEL);
-               if (!ctx->switch_log)
-                       goto out;
-               spin_lock_init(&ctx->switch_log->lock);
-               init_waitqueue_head(&ctx->switch_log->wait);
+               rc = -EBUSY;
+               goto out;
        }
-       mutex_unlock(&ctx->mapping_lock);
+
+       ctx->switch_log = kmalloc(sizeof(struct switch_log) +
+               SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+               GFP_KERNEL);
+
+       if (!ctx->switch_log) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       ctx->switch_log->head = ctx->switch_log->tail = 0;
+       init_waitqueue_head(&ctx->switch_log->wait);
+       rc = 0;
+
+out:
+       spu_release(ctx);
+       return rc;
+}
+
+static int spufs_switch_log_release(struct inode *inode, struct file *file)
+{
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int rc;
+
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
+
+       kfree(ctx->switch_log);
+       ctx->switch_log = NULL;
+       spu_release(ctx);
 
        return 0;
- out:
-       mutex_unlock(&ctx->mapping_lock);
-       return -ENOMEM;
 }
 
 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
@@ -2459,45 +2495,57 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
        int error = 0, cnt = 0;
 
-       if (!buf || len < 0)
+       if (!buf)
                return -EINVAL;
 
+       error = spu_acquire(ctx);
+       if (error)
+               return error;
+
        while (cnt < len) {
                char tbuf[128];
                int width;
 
-               if (file->f_flags & O_NONBLOCK) {
-                       if (spufs_switch_log_used(ctx) <= 0)
-                               return cnt ? cnt : -EAGAIN;
-               } else {
-                       /* Wait for data in buffer */
-                       error = wait_event_interruptible(ctx->switch_log->wait,
-                                       spufs_switch_log_used(ctx) > 0);
-                       if (error)
+               if (spufs_switch_log_used(ctx) == 0) {
+                       if (cnt > 0) {
+                               /* If there's data ready to go, we can
+                                * just return straight away */
+                               break;
+
+                       } else if (file->f_flags & O_NONBLOCK) {
+                               error = -EAGAIN;
                                break;
-               }
 
-               spin_lock(&ctx->switch_log->lock);
-               if (ctx->switch_log->head == ctx->switch_log->tail) {
-                       /* multiple readers race? */
-                       spin_unlock(&ctx->switch_log->lock);
-                       continue;
+                       } else {
+                               /* spufs_wait will drop the mutex and
+                                * re-acquire, but since we're in read(), the
+                                * file cannot be _released (and so
+                                * ctx->switch_log is stable).
+                                */
+                               error = spufs_wait(ctx->switch_log->wait,
+                                               spufs_switch_log_used(ctx) > 0);
+
+                               /* On error, spufs_wait returns without the
+                                * state mutex held */
+                               if (error)
+                                       return error;
+
+                               /* We may have had entries read from underneath
+                                * us while we dropped the mutex in spufs_wait,
+                                * so re-check */
+                               if (spufs_switch_log_used(ctx) == 0)
+                                       continue;
+                       }
                }
 
                width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
-               if (width < len) {
+               if (width < len)
                        ctx->switch_log->tail =
                                (ctx->switch_log->tail + 1) %
                                 SWITCH_LOG_BUFSIZE;
-               }
-
-               spin_unlock(&ctx->switch_log->lock);
-
-               /*
-                * If the record is greater than space available return
-                * partial buffer (so far)
-                */
-               if (width >= len)
+               else
+                       /* If the record is greater than space available return
+                        * partial buffer (so far) */
                        break;
 
                error = copy_to_user(buf + cnt, tbuf, width);
@@ -2506,6 +2554,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
                cnt += width;
        }
 
+       spu_release(ctx);
+
        return cnt == 0 ? error : cnt;
 }
 
@@ -2514,29 +2564,41 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
        struct inode *inode = file->f_path.dentry->d_inode;
        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
        unsigned int mask = 0;
+       int rc;
 
        poll_wait(file, &ctx->switch_log->wait, wait);
 
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
+
        if (spufs_switch_log_used(ctx) > 0)
                mask |= POLLIN;
 
+       spu_release(ctx);
+
        return mask;
 }
 
 static const struct file_operations spufs_switch_log_fops = {
-       .owner  = THIS_MODULE,
-       .open   = spufs_switch_log_open,
-       .read   = spufs_switch_log_read,
-       .poll   = spufs_switch_log_poll,
+       .owner          = THIS_MODULE,
+       .open           = spufs_switch_log_open,
+       .read           = spufs_switch_log_read,
+       .poll           = spufs_switch_log_poll,
+       .release        = spufs_switch_log_release,
 };
 
+/**
+ * Log a context switch event to a switch log reader.
+ *
+ * Must be called with ctx->state_mutex held.
+ */
 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
                u32 type, u32 val)
 {
        if (!ctx->switch_log)
                return;
 
-       spin_lock(&ctx->switch_log->lock);
        if (spufs_switch_log_avail(ctx) > 1) {
                struct switch_log_entry *p;
 
@@ -2550,7 +2612,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
                ctx->switch_log->head =
                        (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
        }
-       spin_unlock(&ctx->switch_log->lock);
 
        wake_up(&ctx->switch_log->wait);
 }
@@ -2575,7 +2636,7 @@ static int spufs_show_ctx(struct seq_file *s, void *private)
        }
 
        seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
-               " %c %lx %lx %lx %lx %x %x\n",
+               " %c %llx %llx %llx %llx %x %x\n",
                ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
                ctx->flags,
                ctx->sched_flags,
@@ -2607,22 +2668,22 @@ static const struct file_operations spufs_ctx_fops = {
        .release        = single_release,
 };
 
-struct spufs_tree_descr spufs_dir_contents[] = {
+const struct spufs_tree_descr spufs_dir_contents[] = {
        { "capabilities", &spufs_caps_fops, 0444, },
-       { "mem",  &spufs_mem_fops,  0666, },
-       { "regs", &spufs_regs_fops,  0666, },
+       { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
+       { "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
        { "mbox", &spufs_mbox_fops, 0444, },
        { "ibox", &spufs_ibox_fops, 0444, },
        { "wbox", &spufs_wbox_fops, 0222, },
-       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
-       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
-       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
        { "signal1", &spufs_signal1_fops, 0666, },
        { "signal2", &spufs_signal2_fops, 0666, },
        { "signal1_type", &spufs_signal1_type, 0666, },
        { "signal2_type", &spufs_signal2_type, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
-       { "fpcr", &spufs_fpcr_fops, 0666, },
+       { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
        { "lslr", &spufs_lslr_ops, 0444, },
        { "mfc", &spufs_mfc_fops, 0666, },
        { "mss", &spufs_mss_fops, 0666, },
@@ -2632,30 +2693,31 @@ struct spufs_tree_descr spufs_dir_contents[] = {
        { "decr_status", &spufs_decr_status_ops, 0666, },
        { "event_mask", &spufs_event_mask_ops, 0666, },
        { "event_status", &spufs_event_status_ops, 0444, },
-       { "psmap", &spufs_psmap_fops, 0666, },
+       { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
        { "phys-id", &spufs_id_ops, 0666, },
        { "object-id", &spufs_object_id_ops, 0666, },
-       { "mbox_info", &spufs_mbox_info_fops, 0444, },
-       { "ibox_info", &spufs_ibox_info_fops, 0444, },
-       { "wbox_info", &spufs_wbox_info_fops, 0444, },
-       { "dma_info", &spufs_dma_info_fops, 0444, },
-       { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+       { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
+       { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
+       { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
+       { "dma_info", &spufs_dma_info_fops, 0444,
+               sizeof(struct spu_dma_info), },
+       { "proxydma_info", &spufs_proxydma_info_fops, 0444,
+               sizeof(struct spu_proxydma_info)},
        { "tid", &spufs_tid_fops, 0444, },
        { "stat", &spufs_stat_fops, 0444, },
        { "switch_log", &spufs_switch_log_fops, 0444 },
-       { ".ctx", &spufs_ctx_fops, 0444, },
        {},
 };
 
-struct spufs_tree_descr spufs_dir_nosched_contents[] = {
+const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
        { "capabilities", &spufs_caps_fops, 0444, },
-       { "mem",  &spufs_mem_fops,  0666, },
+       { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
        { "mbox", &spufs_mbox_fops, 0444, },
        { "ibox", &spufs_ibox_fops, 0444, },
        { "wbox", &spufs_wbox_fops, 0222, },
-       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
-       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
-       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
        { "signal1", &spufs_signal1_nosched_fops, 0222, },
        { "signal2", &spufs_signal2_nosched_fops, 0222, },
        { "signal1_type", &spufs_signal1_type, 0666, },
@@ -2664,16 +2726,20 @@ struct spufs_tree_descr spufs_dir_nosched_contents[] = {
        { "mfc", &spufs_mfc_fops, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
        { "npc", &spufs_npc_ops, 0666, },
-       { "psmap", &spufs_psmap_fops, 0666, },
+       { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
        { "phys-id", &spufs_id_ops, 0666, },
        { "object-id", &spufs_object_id_ops, 0666, },
        { "tid", &spufs_tid_fops, 0444, },
        { "stat", &spufs_stat_fops, 0444, },
+       {},
+};
+
+const struct spufs_tree_descr spufs_dir_debug_contents[] = {
        { ".ctx", &spufs_ctx_fops, 0444, },
        {},
 };
 
-struct spufs_coredump_reader spufs_coredump_read[] = {
+const struct spufs_coredump_reader spufs_coredump_read[] = {
        { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
        { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
        { "lslr", NULL, spufs_lslr_get, 19 },