include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / platforms / ps3 / spu.c
index 644532c..39a472e 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/slab.h>
 #include <linux/mmzone.h>
 #include <linux/io.h>
 #include <linux/mm.h>
 
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
-#include <asm/ps3.h>
 #include <asm/lv1call.h>
+#include <asm/ps3.h>
+
+#include "../cell/spufs/spufs.h"
+#include "platform.h"
 
 /* spu_management_ops */
 
@@ -50,7 +54,7 @@ enum spe_type {
  */
 
 struct spe_shadow {
-       u8 padding_0000[0x0140];
+       u8 padding_0140[0x0140];
        u64 int_status_class0_RW;       /* 0x0140 */
        u64 int_status_class1_RW;       /* 0x0148 */
        u64 int_status_class2_RW;       /* 0x0150 */
@@ -67,8 +71,7 @@ struct spe_shadow {
        u8 padding_0c08[0x0f00-0x0c08];
        u64 spe_execution_status;       /* 0x0f00 */
        u8 padding_0f08[0x1000-0x0f08];
-} __attribute__ ((packed));
-
+};
 
 /**
  * enum spe_ex_state - Logical spe execution state.
@@ -139,9 +142,15 @@ static void _dump_areas(unsigned int spe_id, unsigned long priv2,
        pr_debug("%s:%d: shadow:  %lxh\n", func, line, shadow);
 }
 
+inline u64 ps3_get_spe_id(void *arg)
+{
+       return spu_pdata(arg)->spe_id;
+}
+EXPORT_SYMBOL_GPL(ps3_get_spe_id);
+
 static unsigned long get_vas_id(void)
 {
-       unsigned long id;
+       u64 id;
 
        lv1_get_logical_ppe_id(&id);
        lv1_get_virtual_address_space_id_of_ppe(id, &id);
@@ -152,14 +161,18 @@ static unsigned long get_vas_id(void)
 static int __init construct_spu(struct spu *spu)
 {
        int result;
-       unsigned long unused;
+       u64 unused;
+       u64 problem_phys;
+       u64 local_store_phys;
 
        result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
                PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
-               &spu_pdata(spu)->priv2_addr, &spu->problem_phys,
-               &spu->local_store_phys, &unused,
+               &spu_pdata(spu)->priv2_addr, &problem_phys,
+               &local_store_phys, &unused,
                &spu_pdata(spu)->shadow_addr,
                &spu_pdata(spu)->spe_id);
+       spu->problem_phys = problem_phys;
+       spu->local_store_phys = local_store_phys;
 
        if (result) {
                pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
@@ -170,31 +183,6 @@ static int __init construct_spu(struct spu *spu)
        return result;
 }
 
-static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
-{
-       int result;
-       unsigned long start_pfn;
-       unsigned long nr_pages;
-       struct pglist_data *pgdata;
-       struct zone *zone;
-
-       BUG_ON(!mem_init_done);
-
-       start_pfn = start_addr >> PAGE_SHIFT;
-       nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       pgdata = NODE_DATA(0);
-       zone = pgdata->node_zones;
-
-       result = __add_pages(zone, start_pfn, nr_pages);
-
-       if (result)
-               pr_debug("%s:%d: __add_pages failed: (%d)\n",
-                       __func__, __LINE__, result);
-
-       return result;
-}
-
 static void spu_unmap(struct spu *spu)
 {
        iounmap(spu->priv2);
@@ -203,32 +191,32 @@ static void spu_unmap(struct spu *spu)
        iounmap(spu_pdata(spu)->shadow);
 }
 
+/**
+ * setup_areas - Map the spu regions into the address space.
+ *
+ * The current HV requires the spu shadow regs to be mapped with the
+ * PTE page protection bits set as read-only (PP=3).  This implementation
+ * uses the low level __ioremap() to bypass the page protection settings
+ * inforced by ioremap_flags() to get the needed PTE bits set for the
+ * shadow regs.
+ */
+
 static int __init setup_areas(struct spu *spu)
 {
        struct table {char* name; unsigned long addr; unsigned long size;};
-       int result;
-
-       /* setup pages */
-
-       result = add_spu_pages(spu->local_store_phys, LS_SIZE);
-       if (result)
-               goto fail_add;
+       static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3;
 
-       result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
-       if (result)
-               goto fail_add;
-
-       /* ioremap */
-
-       spu_pdata(spu)->shadow = __ioremap(
-               spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
-               PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
+                                          sizeof(struct spe_shadow),
+                                          shadow_flags);
        if (!spu_pdata(spu)->shadow) {
                pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
                goto fail_ioremap;
        }
 
-       spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
+       spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys,
+               LS_SIZE, _PAGE_NO_CACHE);
+
        if (!spu->local_store) {
                pr_debug("%s:%d: ioremap local_store failed\n",
                        __func__, __LINE__);
@@ -237,6 +225,7 @@ static int __init setup_areas(struct spu *spu)
 
        spu->problem = ioremap(spu->problem_phys,
                sizeof(struct spu_problem));
+
        if (!spu->problem) {
                pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
                goto fail_ioremap;
@@ -244,6 +233,7 @@ static int __init setup_areas(struct spu *spu)
 
        spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
                sizeof(struct spu_priv2));
+
        if (!spu->priv2) {
                pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
                goto fail_ioremap;
@@ -260,28 +250,28 @@ static int __init setup_areas(struct spu *spu)
 
 fail_ioremap:
        spu_unmap(spu);
-fail_add:
-       return result;
+
+       return -ENOMEM;
 }
 
 static int __init setup_interrupts(struct spu *spu)
 {
        int result;
 
-       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 0,
-               &spu->irqs[0]);
+       result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
+               0, &spu->irqs[0]);
 
        if (result)
                goto fail_alloc_0;
 
-       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 1,
-               &spu->irqs[1]);
+       result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
+               1, &spu->irqs[1]);
 
        if (result)
                goto fail_alloc_1;
 
-       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 2,
-               &spu->irqs[2]);
+       result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
+               2, &spu->irqs[2]);
 
        if (result)
                goto fail_alloc_2;
@@ -289,9 +279,9 @@ static int __init setup_interrupts(struct spu *spu)
        return result;
 
 fail_alloc_2:
-       ps3_free_spe_irq(spu->irqs[1]);
+       ps3_spe_irq_destroy(spu->irqs[1]);
 fail_alloc_1:
-       ps3_free_spe_irq(spu->irqs[0]);
+       ps3_spe_irq_destroy(spu->irqs[0]);
 fail_alloc_0:
        spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
        return result;
@@ -339,9 +329,9 @@ static int ps3_destroy_spu(struct spu *spu)
        result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
        BUG_ON(result);
 
-       ps3_free_spe_irq(spu->irqs[2]);
-       ps3_free_spe_irq(spu->irqs[1]);
-       ps3_free_spe_irq(spu->irqs[0]);
+       ps3_spe_irq_destroy(spu->irqs[2]);
+       ps3_spe_irq_destroy(spu->irqs[1]);
+       ps3_spe_irq_destroy(spu->irqs[0]);
 
        spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
 
@@ -438,17 +428,49 @@ static int __init ps3_enumerate_spus(int (*fn)(void *data))
                }
        }
 
-       if (result)
+       if (result) {
                printk(KERN_WARNING "%s:%d: Error initializing spus\n",
                        __func__, __LINE__);
+               return result;
+       }
 
-       return result;
+       return num_resource_id;
+}
+
+static int ps3_init_affinity(void)
+{
+       return 0;
+}
+
+/**
+ * ps3_enable_spu - Enable SPU run control.
+ *
+ * An outstanding enhancement for the PS3 would be to add a guard to check
+ * for incorrect access to the spu problem state when the spu context is
+ * disabled.  This check could be implemented with a flag added to the spu
+ * context that would inhibit mapping problem state pages, and a routine
+ * to unmap spu problem state pages.  When the spu is enabled with
+ * ps3_enable_spu() the flag would be set allowing pages to be mapped,
+ * and when the spu is disabled with ps3_disable_spu() the flag would be
+ * cleared and the mapped problem state pages would be unmapped.
+ */
+
+static void ps3_enable_spu(struct spu_context *ctx)
+{
+}
+
+static void ps3_disable_spu(struct spu_context *ctx)
+{
+       ctx->ops->runcntl_stop(ctx);
 }
 
 const struct spu_management_ops spu_management_ps3_ops = {
        .enumerate_spus = ps3_enumerate_spus,
        .create_spu = ps3_create_spu,
        .destroy_spu = ps3_destroy_spu,
+       .enable_spu = ps3_enable_spu,
+       .disable_spu = ps3_disable_spu,
+       .init_affinity = ps3_init_affinity,
 };
 
 /* spu_priv1_ops */