include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / platforms / cell / spufs / file.c
index ba6101a..5c28082 100644 (file)
 #include <linux/poll.h>
 #include <linux/ptrace.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
 
 #include <asm/io.h>
-#include <asm/semaphore.h>
+#include <asm/time.h>
 #include <asm/spu.h>
 #include <asm/spu_info.h>
 #include <asm/uaccess.h>
 
 #include "spufs.h"
+#include "sputrace.h"
 
 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
 
@@ -146,7 +148,7 @@ static int __fops ## _open(struct inode *inode, struct file *file)  \
        __simple_attr_check_format(__fmt, 0ull);                        \
        return spufs_attr_open(inode, file, __get, __set, __fmt);       \
 }                                                                      \
-static struct file_operations __fops = {                               \
+static const struct file_operations __fops = {                         \
        .owner   = THIS_MODULE,                                         \
        .open    = __fops ## _open,                                     \
        .release = spufs_attr_release,                                  \
@@ -237,11 +239,13 @@ spufs_mem_write(struct file *file, const char __user *buffer,
        return size;
 }
 
-static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
-                                         unsigned long address)
+static int
+spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct spu_context *ctx = vma->vm_file->private_data;
-       unsigned long pfn, offset, addr0 = address;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       unsigned long pfn, offset;
+
 #ifdef CONFIG_SPU_FS_64K_LS
        struct spu_state *csa = &ctx->csa;
        int psize;
@@ -259,35 +263,56 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
        }
 #endif /* CONFIG_SPU_FS_64K_LS */
 
-       offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
+       offset = vmf->pgoff << PAGE_SHIFT;
        if (offset >= LS_SIZE)
-               return NOPFN_SIGBUS;
+               return VM_FAULT_SIGBUS;
 
-       pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
-                addr0, address, offset);
+       pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
+                       address, offset);
 
        if (spu_acquire(ctx))
-               return NOPFN_REFAULT;
+               return VM_FAULT_NOPAGE;
 
        if (ctx->state == SPU_STATE_SAVED) {
-               vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                                       & ~_PAGE_NO_CACHE);
+               vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
                pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
        } else {
-               vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                            | _PAGE_NO_CACHE);
+               vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
                pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
        }
        vm_insert_pfn(vma, address, pfn);
 
        spu_release(ctx);
 
-       return NOPFN_REFAULT;
+       return VM_FAULT_NOPAGE;
 }
 
+static int spufs_mem_mmap_access(struct vm_area_struct *vma,
+                               unsigned long address,
+                               void *buf, int len, int write)
+{
+       struct spu_context *ctx = vma->vm_file->private_data;
+       unsigned long offset = address - vma->vm_start;
+       char *local_store;
 
-static struct vm_operations_struct spufs_mem_mmap_vmops = {
-       .nopfn = spufs_mem_mmap_nopfn,
+       if (write && !(vma->vm_flags & VM_WRITE))
+               return -EACCES;
+       if (spu_acquire(ctx))
+               return -EINTR;
+       if ((offset + len) > vma->vm_end)
+               len = vma->vm_end - offset;
+       local_store = ctx->ops->get_ls(ctx);
+       if (write)
+               memcpy_toio(local_store + offset, buf, len);
+       else
+               memcpy_fromio(buf, local_store + offset, len);
+       spu_release(ctx);
+       return len;
+}
+
+static const struct vm_operations_struct spufs_mem_mmap_vmops = {
+       .fault = spufs_mem_mmap_fault,
+       .access = spufs_mem_mmap_access,
 };
 
 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
@@ -312,8 +337,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE);
+       vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mem_mmap_vmops;
        return 0;
@@ -350,51 +374,71 @@ static const struct file_operations spufs_mem_fops = {
 #endif
 };
 
-static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
-                                   unsigned long address,
+static int spufs_ps_fault(struct vm_area_struct *vma,
+                                   struct vm_fault *vmf,
                                    unsigned long ps_offs,
                                    unsigned long ps_size)
 {
        struct spu_context *ctx = vma->vm_file->private_data;
-       unsigned long area, offset = address - vma->vm_start;
+       unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
+       int ret = 0;
+
+       spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
 
-       offset += vma->vm_pgoff << PAGE_SHIFT;
        if (offset >= ps_size)
-               return NOPFN_SIGBUS;
+               return VM_FAULT_SIGBUS;
+
+       if (fatal_signal_pending(current))
+               return VM_FAULT_SIGBUS;
+
+       /*
+        * Because we release the mmap_sem, the context may be destroyed while
+        * we're in spu_wait. Grab an extra reference so it isn't destroyed
+        * in the meantime.
+        */
+       get_spu_context(ctx);
 
        /*
         * We have to wait for context to be loaded before we have
         * pages to hand out to the user, but we don't want to wait
         * with the mmap_sem held.
         * It is possible to drop the mmap_sem here, but then we need
-        * to return NOPFN_REFAULT because the mappings may have
+        * to return VM_FAULT_NOPAGE because the mappings may have
         * hanged.
         */
        if (spu_acquire(ctx))
-               return NOPFN_REFAULT;
+               goto refault;
 
        if (ctx->state == SPU_STATE_SAVED) {
                up_read(&current->mm->mmap_sem);
-               spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+               spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
+               ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+               spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
                down_read(&current->mm->mmap_sem);
        } else {
                area = ctx->spu->problem_phys + ps_offs;
-               vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+               vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+                                       (area + offset) >> PAGE_SHIFT);
+               spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
        }
 
-       spu_release(ctx);
-       return NOPFN_REFAULT;
+       if (!ret)
+               spu_release(ctx);
+
+refault:
+       put_spu_context(ctx);
+       return VM_FAULT_NOPAGE;
 }
 
 #if SPUFS_MMAP_4K
-static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
-                                          unsigned long address)
+static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
+                                          struct vm_fault *vmf)
 {
-       return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
+       return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_cntl_mmap_vmops = {
-       .nopfn = spufs_cntl_mmap_nopfn,
+static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
+       .fault = spufs_cntl_mmap_fault,
 };
 
 /*
@@ -406,8 +450,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_cntl_mmap_vmops;
        return 0;
@@ -454,7 +497,7 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
        if (!i->i_openers++)
                ctx->cntl = inode->i_mapping;
        mutex_unlock(&ctx->mapping_lock);
-       return spufs_attr_open(inode, file, spufs_cntl_get,
+       return simple_attr_open(inode, file, spufs_cntl_get,
                                        spufs_cntl_set, "0x%08lx");
 }
 
@@ -464,7 +507,7 @@ spufs_cntl_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spufs_attr_release(inode, file);
+       simple_attr_release(inode, file);
 
        mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
@@ -476,8 +519,8 @@ spufs_cntl_release(struct inode *inode, struct file *file)
 static const struct file_operations spufs_cntl_fops = {
        .open = spufs_cntl_open,
        .release = spufs_cntl_release,
-       .read = spufs_attr_read,
-       .write = spufs_attr_write,
+       .read = simple_attr_read,
+       .write = simple_attr_write,
        .mmap = spufs_cntl_mmap,
 };
 
@@ -505,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
        int ret;
        struct spu_context *ctx = file->private_data;
 
+       /* pre-check for file position: if we'd return EOF, there's no point
+        * causing a deschedule */
+       if (*pos >= sizeof(ctx->csa.lscsa->gprs))
+               return 0;
+
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
@@ -521,16 +569,17 @@ spufs_regs_write(struct file *file, const char __user *buffer,
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
 
-       size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
-       if (size <= 0)
+       if (*pos >= sizeof(lscsa->gprs))
                return -EFBIG;
+
+       size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
        *pos += size;
 
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
 
-       ret = copy_from_user(lscsa->gprs + *pos - size,
+       ret = copy_from_user((char *)lscsa->gprs + *pos - size,
                             buffer, size) ? -EFAULT : size;
 
        spu_release_saved(ctx);
@@ -576,10 +625,11 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
 
-       size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
-       if (size <= 0)
+       if (*pos >= sizeof(lscsa->fpcr))
                return -EFBIG;
 
+       size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
+
        ret = spu_acquire_saved(ctx);
        if (ret)
                return ret;
@@ -749,23 +799,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
 
        count = spu_acquire(ctx);
        if (count)
-               return count;
+               goto out;
 
        /* wait only for the first element */
        count = 0;
        if (file->f_flags & O_NONBLOCK) {
-               if (!spu_ibox_read(ctx, &ibox_data))
+               if (!spu_ibox_read(ctx, &ibox_data)) {
                        count = -EAGAIN;
+                       goto out_unlock;
+               }
        } else {
                count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
+               if (count)
+                       goto out;
        }
-       if (count)
-               goto out;
 
        /* if we can't write at all, return -EFAULT */
        count = __put_user(ibox_data, udata);
        if (count)
-               goto out;
+               goto out_unlock;
 
        for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
                int ret;
@@ -782,9 +834,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
                        break;
        }
 
-out:
+out_unlock:
        spu_release(ctx);
-
+out:
        return count;
 }
 
@@ -899,7 +951,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
 
        count = spu_acquire(ctx);
        if (count)
-               return count;
+               goto out;
 
        /*
         * make sure we can at least write one element, by waiting
@@ -907,14 +959,16 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
         */
        count = 0;
        if (file->f_flags & O_NONBLOCK) {
-               if (!spu_wbox_write(ctx, wbox_data))
+               if (!spu_wbox_write(ctx, wbox_data)) {
                        count = -EAGAIN;
+                       goto out_unlock;
+               }
        } else {
                count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
+               if (count)
+                       goto out;
        }
 
-       if (count)
-               goto out;
 
        /* write as much as possible */
        for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
@@ -928,8 +982,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
                        break;
        }
 
-out:
+out_unlock:
        spu_release(ctx);
+out:
        return count;
 }
 
@@ -1074,23 +1129,23 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
        return 4;
 }
 
-static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
-                                             unsigned long address)
+static int
+spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-#if PAGE_SIZE == 0x1000
-       return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
-#elif PAGE_SIZE == 0x10000
+#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
+       return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
+#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
         * signal 1 and 2 area
         */
-       return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
+       return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
 #else
 #error unsupported page size
 #endif
 }
 
-static struct vm_operations_struct spufs_signal1_mmap_vmops = {
-       .nopfn = spufs_signal1_mmap_nopfn,
+static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
+       .fault = spufs_signal1_mmap_fault,
 };
 
 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1099,8 +1154,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal1_mmap_vmops;
        return 0;
@@ -1211,23 +1265,23 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
 }
 
 #if SPUFS_MMAP_4K
-static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
-                                             unsigned long address)
+static int
+spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-#if PAGE_SIZE == 0x1000
-       return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
-#elif PAGE_SIZE == 0x10000
+#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
+       return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
+#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
         * signal 1 and 2 area
         */
-       return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
+       return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
 #else
 #error unsupported page size
 #endif
 }
 
-static struct vm_operations_struct spufs_signal2_mmap_vmops = {
-       .nopfn = spufs_signal2_mmap_nopfn,
+static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
+       .fault = spufs_signal2_mmap_fault,
 };
 
 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1236,8 +1290,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal2_mmap_vmops;
        return 0;
@@ -1314,7 +1367,7 @@ static u64 spufs_signal1_type_get(struct spu_context *ctx)
        return ctx->ops->signal1_type_get(ctx);
 }
 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
-                      spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
+                      spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
 
 
 static int spufs_signal2_type_set(void *data, u64 val)
@@ -1336,17 +1389,17 @@ static u64 spufs_signal2_type_get(struct spu_context *ctx)
        return ctx->ops->signal2_type_get(ctx);
 }
 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
-                      spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
+                      spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
 
 #if SPUFS_MMAP_4K
-static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
-                                         unsigned long address)
+static int
+spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
+       return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_mss_mmap_vmops = {
-       .nopfn = spufs_mss_mmap_nopfn,
+static const struct vm_operations_struct spufs_mss_mmap_vmops = {
+       .fault = spufs_mss_mmap_fault,
 };
 
 /*
@@ -1358,8 +1411,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mss_mmap_vmops;
        return 0;
@@ -1401,14 +1453,14 @@ static const struct file_operations spufs_mss_fops = {
        .mmap    = spufs_mss_mmap,
 };
 
-static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
-                                           unsigned long address)
+static int
+spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
+       return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_psmap_mmap_vmops = {
-       .nopfn = spufs_psmap_mmap_nopfn,
+static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
+       .fault = spufs_psmap_mmap_fault,
 };
 
 /*
@@ -1420,8 +1472,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_psmap_mmap_vmops;
        return 0;
@@ -1461,14 +1512,14 @@ static const struct file_operations spufs_psmap_fops = {
 
 
 #if SPUFS_MMAP_4K
-static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
-                                         unsigned long address)
+static int
+spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
+       return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
 }
 
-static struct vm_operations_struct spufs_mfc_mmap_vmops = {
-       .nopfn = spufs_mfc_mmap_nopfn,
+static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
+       .fault = spufs_mfc_mmap_fault,
 };
 
 /*
@@ -1480,8 +1531,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP;
-       vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                    | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mfc_mmap_vmops;
        return 0;
@@ -1533,7 +1583,7 @@ void spufs_mfc_callback(struct spu *spu)
 
        wake_up_all(&ctx->mfc_wq);
 
-       pr_debug("%s %s\n", __FUNCTION__, spu->name);
+       pr_debug("%s %s\n", __func__, spu->name);
        if (ctx->mfc_fasync) {
                u32 free_elements, tagstatus;
                unsigned int mask;
@@ -1592,12 +1642,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
        } else {
                ret = spufs_wait(ctx->mfc_wq,
                           spufs_read_mfc_tagstatus(ctx, &status));
+               if (ret)
+                       goto out;
        }
        spu_release(ctx);
 
-       if (ret)
-               goto out;
-
        ret = 4;
        if (copy_to_user(buffer, &status, 4))
                ret = -EFAULT;
@@ -1608,7 +1657,7 @@ out:
 
 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
 {
-       pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
+       pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
                 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
 
        switch (cmd->cmd) {
@@ -1625,7 +1674,7 @@ static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
        }
 
        if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
-               pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
+               pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
                                cmd->ea, cmd->lsa);
                return -EIO;
        }
@@ -1726,6 +1775,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
                int status;
                ret = spufs_wait(ctx->mfc_wq,
                                 spu_send_mfc_command(ctx, cmd, &status));
+               if (ret)
+                       goto out;
                if (status)
                        ret = status;
        }
@@ -1766,7 +1817,7 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
        if (tagstatus & ctx->tagwait)
                mask |= POLLIN | POLLRDNORM;
 
-       pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
+       pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
                free_elements, tagstatus, ctx->tagwait);
 
        return mask;
@@ -1779,7 +1830,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
 
        ret = spu_acquire(ctx);
        if (ret)
-               return ret;
+               goto out;
 #if 0
 /* this currently hangs */
        ret = spufs_wait(ctx->mfc_wq,
@@ -1788,12 +1839,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
                goto out;
        ret = spufs_wait(ctx->mfc_wq,
                         ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
-out:
+       if (ret)
+               goto out;
 #else
        ret = 0;
 #endif
        spu_release(ctx);
-
+out:
        return ret;
 }
 
@@ -2026,13 +2078,13 @@ static const struct file_operations spufs_caps_fops = {
 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
                        char __user *buf, size_t len, loff_t *pos)
 {
-       u32 mbox_stat;
        u32 data;
 
-       mbox_stat = ctx->csa.prob.mb_stat_R;
-       if (mbox_stat & 0x0000ff) {
-               data = ctx->csa.prob.pu_mb_R;
-       }
+       /* EOF if there's no entry in the mbox */
+       if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
+               return 0;
+
+       data = ctx->csa.prob.pu_mb_R;
 
        return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
 }
@@ -2066,13 +2118,13 @@ static const struct file_operations spufs_mbox_info_fops = {
 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
                                char __user *buf, size_t len, loff_t *pos)
 {
-       u32 ibox_stat;
        u32 data;
 
-       ibox_stat = ctx->csa.prob.mb_stat_R;
-       if (ibox_stat & 0xff0000) {
-               data = ctx->csa.priv2.puint_mb_R;
-       }
+       /* EOF if there's no entry in the ibox */
+       if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
+               return 0;
+
+       data = ctx->csa.priv2.puint_mb_R;
 
        return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
 }
@@ -2362,23 +2414,276 @@ static const struct file_operations spufs_stat_fops = {
        .release        = single_release,
 };
 
+static inline int spufs_switch_log_used(struct spu_context *ctx)
+{
+       return (ctx->switch_log->head - ctx->switch_log->tail) %
+               SWITCH_LOG_BUFSIZE;
+}
+
+static inline int spufs_switch_log_avail(struct spu_context *ctx)
+{
+       return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
+}
+
+static int spufs_switch_log_open(struct inode *inode, struct file *file)
+{
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int rc;
+
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
+
+       if (ctx->switch_log) {
+               rc = -EBUSY;
+               goto out;
+       }
+
+       ctx->switch_log = kmalloc(sizeof(struct switch_log) +
+               SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+               GFP_KERNEL);
+
+       if (!ctx->switch_log) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       ctx->switch_log->head = ctx->switch_log->tail = 0;
+       init_waitqueue_head(&ctx->switch_log->wait);
+       rc = 0;
+
+out:
+       spu_release(ctx);
+       return rc;
+}
+
+static int spufs_switch_log_release(struct inode *inode, struct file *file)
+{
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int rc;
+
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
+
+       kfree(ctx->switch_log);
+       ctx->switch_log = NULL;
+       spu_release(ctx);
+
+       return 0;
+}
+
+static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
+{
+       struct switch_log_entry *p;
+
+       p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
+
+       return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
+                       (unsigned int) p->tstamp.tv_sec,
+                       (unsigned int) p->tstamp.tv_nsec,
+                       p->spu_id,
+                       (unsigned int) p->type,
+                       (unsigned int) p->val,
+                       (unsigned long long) p->timebase);
+}
+
+static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
+                            size_t len, loff_t *ppos)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int error = 0, cnt = 0;
+
+       if (!buf)
+               return -EINVAL;
+
+       error = spu_acquire(ctx);
+       if (error)
+               return error;
+
+       while (cnt < len) {
+               char tbuf[128];
+               int width;
+
+               if (spufs_switch_log_used(ctx) == 0) {
+                       if (cnt > 0) {
+                               /* If there's data ready to go, we can
+                                * just return straight away */
+                               break;
+
+                       } else if (file->f_flags & O_NONBLOCK) {
+                               error = -EAGAIN;
+                               break;
+
+                       } else {
+                               /* spufs_wait will drop the mutex and
+                                * re-acquire, but since we're in read(), the
+                                * file cannot be _released (and so
+                                * ctx->switch_log is stable).
+                                */
+                               error = spufs_wait(ctx->switch_log->wait,
+                                               spufs_switch_log_used(ctx) > 0);
+
+                               /* On error, spufs_wait returns without the
+                                * state mutex held */
+                               if (error)
+                                       return error;
+
+                               /* We may have had entries read from underneath
+                                * us while we dropped the mutex in spufs_wait,
+                                * so re-check */
+                               if (spufs_switch_log_used(ctx) == 0)
+                                       continue;
+                       }
+               }
+
+               width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
+               if (width < len)
+                       ctx->switch_log->tail =
+                               (ctx->switch_log->tail + 1) %
+                                SWITCH_LOG_BUFSIZE;
+               else
+                       /* If the record is greater than space available return
+                        * partial buffer (so far) */
+                       break;
+
+               error = copy_to_user(buf + cnt, tbuf, width);
+               if (error)
+                       break;
+               cnt += width;
+       }
+
+       spu_release(ctx);
+
+       return cnt == 0 ? error : cnt;
+}
+
+static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       unsigned int mask = 0;
+       int rc;
+
+       poll_wait(file, &ctx->switch_log->wait, wait);
+
+       rc = spu_acquire(ctx);
+       if (rc)
+               return rc;
+
+       if (spufs_switch_log_used(ctx) > 0)
+               mask |= POLLIN;
+
+       spu_release(ctx);
+
+       return mask;
+}
+
+static const struct file_operations spufs_switch_log_fops = {
+       .owner          = THIS_MODULE,
+       .open           = spufs_switch_log_open,
+       .read           = spufs_switch_log_read,
+       .poll           = spufs_switch_log_poll,
+       .release        = spufs_switch_log_release,
+};
+
+/**
+ * Log a context switch event to a switch log reader.
+ *
+ * Must be called with ctx->state_mutex held.
+ */
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+               u32 type, u32 val)
+{
+       if (!ctx->switch_log)
+               return;
+
+       if (spufs_switch_log_avail(ctx) > 1) {
+               struct switch_log_entry *p;
+
+               p = ctx->switch_log->log + ctx->switch_log->head;
+               ktime_get_ts(&p->tstamp);
+               p->timebase = get_tb();
+               p->spu_id = spu ? spu->number : -1;
+               p->type = type;
+               p->val = val;
+
+               ctx->switch_log->head =
+                       (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
+       }
+
+       wake_up(&ctx->switch_log->wait);
+}
+
+static int spufs_show_ctx(struct seq_file *s, void *private)
+{
+       struct spu_context *ctx = s->private;
+       u64 mfc_control_RW;
+
+       mutex_lock(&ctx->state_mutex);
+       if (ctx->spu) {
+               struct spu *spu = ctx->spu;
+               struct spu_priv2 __iomem *priv2 = spu->priv2;
 
-struct tree_descr spufs_dir_contents[] = {
+               spin_lock_irq(&spu->register_lock);
+               mfc_control_RW = in_be64(&priv2->mfc_control_RW);
+               spin_unlock_irq(&spu->register_lock);
+       } else {
+               struct spu_state *csa = &ctx->csa;
+
+               mfc_control_RW = csa->priv2.mfc_control_RW;
+       }
+
+       seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
+               " %c %llx %llx %llx %llx %x %x\n",
+               ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
+               ctx->flags,
+               ctx->sched_flags,
+               ctx->prio,
+               ctx->time_slice,
+               ctx->spu ? ctx->spu->number : -1,
+               !list_empty(&ctx->rq) ? 'q' : ' ',
+               ctx->csa.class_0_pending,
+               ctx->csa.class_0_dar,
+               ctx->csa.class_1_dsisr,
+               mfc_control_RW,
+               ctx->ops->runcntl_read(ctx),
+               ctx->ops->status_read(ctx));
+
+       mutex_unlock(&ctx->state_mutex);
+
+       return 0;
+}
+
+static int spufs_ctx_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_ctx_fops = {
+       .open           = spufs_ctx_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+const struct spufs_tree_descr spufs_dir_contents[] = {
        { "capabilities", &spufs_caps_fops, 0444, },
-       { "mem",  &spufs_mem_fops,  0666, },
-       { "regs", &spufs_regs_fops,  0666, },
+       { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
+       { "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
        { "mbox", &spufs_mbox_fops, 0444, },
        { "ibox", &spufs_ibox_fops, 0444, },
        { "wbox", &spufs_wbox_fops, 0222, },
-       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
-       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
-       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
        { "signal1", &spufs_signal1_fops, 0666, },
        { "signal2", &spufs_signal2_fops, 0666, },
        { "signal1_type", &spufs_signal1_type, 0666, },
        { "signal2_type", &spufs_signal2_type, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
-       { "fpcr", &spufs_fpcr_fops, 0666, },
+       { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
        { "lslr", &spufs_lslr_ops, 0444, },
        { "mfc", &spufs_mfc_fops, 0666, },
        { "mss", &spufs_mss_fops, 0666, },
@@ -2388,28 +2693,31 @@ struct tree_descr spufs_dir_contents[] = {
        { "decr_status", &spufs_decr_status_ops, 0666, },
        { "event_mask", &spufs_event_mask_ops, 0666, },
        { "event_status", &spufs_event_status_ops, 0444, },
-       { "psmap", &spufs_psmap_fops, 0666, },
+       { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
        { "phys-id", &spufs_id_ops, 0666, },
        { "object-id", &spufs_object_id_ops, 0666, },
-       { "mbox_info", &spufs_mbox_info_fops, 0444, },
-       { "ibox_info", &spufs_ibox_info_fops, 0444, },
-       { "wbox_info", &spufs_wbox_info_fops, 0444, },
-       { "dma_info", &spufs_dma_info_fops, 0444, },
-       { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+       { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
+       { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
+       { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
+       { "dma_info", &spufs_dma_info_fops, 0444,
+               sizeof(struct spu_dma_info), },
+       { "proxydma_info", &spufs_proxydma_info_fops, 0444,
+               sizeof(struct spu_proxydma_info)},
        { "tid", &spufs_tid_fops, 0444, },
        { "stat", &spufs_stat_fops, 0444, },
+       { "switch_log", &spufs_switch_log_fops, 0444 },
        {},
 };
 
-struct tree_descr spufs_dir_nosched_contents[] = {
+const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
        { "capabilities", &spufs_caps_fops, 0444, },
-       { "mem",  &spufs_mem_fops,  0666, },
+       { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
        { "mbox", &spufs_mbox_fops, 0444, },
        { "ibox", &spufs_ibox_fops, 0444, },
        { "wbox", &spufs_wbox_fops, 0222, },
-       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
-       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
-       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
        { "signal1", &spufs_signal1_nosched_fops, 0222, },
        { "signal2", &spufs_signal2_nosched_fops, 0222, },
        { "signal1_type", &spufs_signal1_type, 0666, },
@@ -2418,7 +2726,7 @@ struct tree_descr spufs_dir_nosched_contents[] = {
        { "mfc", &spufs_mfc_fops, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
        { "npc", &spufs_npc_ops, 0666, },
-       { "psmap", &spufs_psmap_fops, 0666, },
+       { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
        { "phys-id", &spufs_id_ops, 0666, },
        { "object-id", &spufs_object_id_ops, 0666, },
        { "tid", &spufs_tid_fops, 0444, },
@@ -2426,7 +2734,12 @@ struct tree_descr spufs_dir_nosched_contents[] = {
        {},
 };
 
-struct spufs_coredump_reader spufs_coredump_read[] = {
+const struct spufs_tree_descr spufs_dir_debug_contents[] = {
+       { ".ctx", &spufs_ctx_fops, 0444, },
+       {},
+};
+
+const struct spufs_coredump_reader spufs_coredump_read[] = {
        { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
        { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
        { "lslr", NULL, spufs_lslr_get, 19 },