pkt_sched: Change PSCHED_SHIFT from 10 to 6
[safe/jmp/linux-2.6] / drivers / misc / sgi-xp / xpc_partition.c
index ca6784f..65877bc 100644 (file)
  *
  */
 
-#include <linux/kernel.h>
-#include <linux/sysctl.h>
-#include <linux/cache.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/addrs.h>
+#include <linux/device.h>
+#include <linux/hardirq.h>
 #include "xpc.h"
 
 /* XPC is exiting flag */
@@ -67,56 +60,61 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
  * Given a nasid, get the physical address of the  partition's reserved page
  * for that nasid. This function returns 0 on any error.
  */
-static u64
+static unsigned long
 xpc_get_rsvd_page_pa(int nasid)
 {
        enum xp_retval ret;
-       s64 status;
        u64 cookie = 0;
-       u64 rp_pa = nasid;      /* seed with nasid */
-       u64 len = 0;
-       u64 buf = buf;
-       u64 buf_len = 0;
+       unsigned long rp_pa = nasid;    /* seed with nasid */
+       size_t len = 0;
+       size_t buf_len = 0;
+       void *buf = buf;
        void *buf_base = NULL;
+       enum xp_retval (*get_partition_rsvd_page_pa)
+               (void *, u64 *, unsigned long *, size_t *) =
+               xpc_arch_ops.get_partition_rsvd_page_pa;
 
        while (1) {
 
-               status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
-                                                      &len);
+               /* !!! rp_pa will need to be _gpa on UV.
+                * ??? So do we save it into the architecture specific parts
+                * ??? of the xpc_partition structure? Do we rename this
+                * ??? function or have two versions? Rename rp_pa for UV to
+                * ??? rp_gpa?
+                */
+               ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
 
-               dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
-                       "0x%016lx, address=0x%016lx, len=0x%016lx\n",
-                       status, cookie, rp_pa, len);
+               dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
+                       "address=0x%016lx, len=0x%016lx\n", ret,
+                       (unsigned long)cookie, rp_pa, len);
 
-               if (status != SALRET_MORE_PASSES)
+               if (ret != xpNeedMoreInfo)
                        break;
 
                /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
                if (L1_CACHE_ALIGN(len) > buf_len) {
                        kfree(buf_base);
                        buf_len = L1_CACHE_ALIGN(len);
-                       buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
-                                                                GFP_KERNEL,
-                                                                &buf_base);
+                       buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
+                                                           &buf_base);
                        if (buf_base == NULL) {
                                dev_err(xpc_part, "unable to kmalloc "
                                        "len=0x%016lx\n", buf_len);
-                               status = SALRET_ERROR;
+                               ret = xpNoMemory;
                                break;
                        }
                }
 
-               ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len);
+               ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
                if (ret != xpSuccess) {
                        dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
-                       status = SALRET_ERROR;
                        break;
                }
        }
 
        kfree(buf_base);
 
-       if (status != SALRET_OK)
+       if (ret != xpSuccess)
                rp_pa = 0;
 
        dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
@@ -128,21 +126,22 @@ xpc_get_rsvd_page_pa(int nasid)
  * other partitions to discover we are alive and establish initial
  * communications.
  */
-struct xpc_rsvd_page *
+int
 xpc_setup_rsvd_page(void)
 {
+       int ret;
        struct xpc_rsvd_page *rp;
-       u64 rp_pa;
-       unsigned long new_stamp;
+       unsigned long rp_pa;
+       unsigned long new_ts_jiffies;
 
        /* get the local reserved page's address */
 
        preempt_disable();
-       rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
+       rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
        preempt_enable();
        if (rp_pa == 0) {
                dev_err(xpc_part, "SAL failed to locate the reserved page\n");
-               return NULL;
+               return -ESRCH;
        }
        rp = (struct xpc_rsvd_page *)__va(rp_pa);
 
@@ -150,13 +149,13 @@ xpc_setup_rsvd_page(void)
                /* SAL_versions < 3 had a SAL_partid defined as a u8 */
                rp->SAL_partid &= 0xff;
        }
-       BUG_ON(rp->SAL_partid != sn_partition_id);
+       BUG_ON(rp->SAL_partid != xp_partition_id);
 
        if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
                dev_err(xpc_part, "the reserved page's partid of %d is outside "
                        "supported range (< 0 || >= %d)\n", rp->SAL_partid,
                        xp_max_npartitions);
-               return NULL;
+               return -EINVAL;
        }
 
        rp->version = XPC_RP_VERSION;
@@ -175,20 +174,29 @@ xpc_setup_rsvd_page(void)
        xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
        xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
 
-       if (xpc_rsvd_page_init(rp) != xpSuccess)
-               return NULL;
+       ret = xpc_arch_ops.setup_rsvd_page(rp);
+       if (ret != 0)
+               return ret;
 
        /*
         * Set timestamp of when reserved page was setup by XPC.
         * This signifies to the remote partition that our reserved
         * page is initialized.
         */
-       new_stamp = jiffies;
-       if (new_stamp == 0 || new_stamp == rp->stamp)
-               new_stamp++;
-       rp->stamp = new_stamp;
+       new_ts_jiffies = jiffies;
+       if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
+               new_ts_jiffies++;
+       rp->ts_jiffies = new_ts_jiffies;
+
+       xpc_rsvd_page = rp;
+       return 0;
+}
 
-       return rp;
+void
+xpc_teardown_rsvd_page(void)
+{
+       /* a zero timestamp indicates our rsvd page is not initialized */
+       xpc_rsvd_page->ts_jiffies = 0;
 }
 
 /*
@@ -200,7 +208,7 @@ xpc_setup_rsvd_page(void)
  */
 enum xp_retval
 xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
-                 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
+                 struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
 {
        int l;
        enum xp_retval ret;
@@ -212,7 +220,7 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
                return xpNoRsvdPageAddr;
 
        /* pull over the reserved page header and part_nasids mask */
-       ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa,
+       ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
                               XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
        if (ret != xpSuccess)
                return ret;
@@ -225,8 +233,8 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
                        discovered_nasids[l] |= remote_part_nasids[l];
        }
 
-       /* see if the reserved page has been set up by XPC */
-       if (remote_rp->stamp == 0)
+       /* zero timestamp indicates the reserved page has not been setup */
+       if (remote_rp->ts_jiffies == 0)
                return xpRsvdPageNotSet;
 
        if (XPC_VERSION_MAJOR(remote_rp->version) !=
@@ -237,11 +245,11 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
        /* check that both remote and local partids are valid for each side */
        if (remote_rp->SAL_partid < 0 ||
            remote_rp->SAL_partid >= xp_max_npartitions ||
-           remote_rp->max_npartitions <= sn_partition_id) {
+           remote_rp->max_npartitions <= xp_partition_id) {
                return xpInvalidPartid;
        }
 
-       if (remote_rp->SAL_partid == sn_partition_id)
+       if (remote_rp->SAL_partid == xp_partition_id)
                return xpLocalPartid;
 
        return xpSuccess;
@@ -258,7 +266,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
        short partid = XPC_PARTID(part);
        int disengaged;
 
-       disengaged = !xpc_partition_engaged(partid);
+       disengaged = !xpc_arch_ops.partition_engaged(partid);
        if (part->disengage_timeout) {
                if (!disengaged) {
                        if (time_is_after_jiffies(part->disengage_timeout)) {
@@ -274,7 +282,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
                        dev_info(xpc_part, "deactivate request to remote "
                                 "partition %d timed out\n", partid);
                        xpc_disengage_timedout = 1;
-                       xpc_assume_partition_disengaged(partid);
+                       xpc_arch_ops.assume_partition_disengaged(partid);
                        disengaged = 1;
                }
                part->disengage_timeout = 0;
@@ -283,12 +291,12 @@ xpc_partition_disengaged(struct xpc_partition *part)
                if (!in_interrupt())
                        del_singleshot_timer_sync(&part->disengage_timer);
 
-               DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
-                       part->act_state != XPC_P_INACTIVE);
-               if (part->act_state != XPC_P_INACTIVE)
+               DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
+                       part->act_state != XPC_P_AS_INACTIVE);
+               if (part->act_state != XPC_P_AS_INACTIVE)
                        xpc_wakeup_channel_mgr(part);
 
-               xpc_cancel_partition_deactivation_request(part);
+               xpc_arch_ops.cancel_partition_deactivation_request(part);
        }
        return disengaged;
 }
@@ -305,8 +313,8 @@ xpc_mark_partition_active(struct xpc_partition *part)
        dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
 
        spin_lock_irqsave(&part->act_lock, irq_flags);
-       if (part->act_state == XPC_P_ACTIVATING) {
-               part->act_state = XPC_P_ACTIVE;
+       if (part->act_state == XPC_P_AS_ACTIVATING) {
+               part->act_state = XPC_P_AS_ACTIVE;
                ret = xpSuccess;
        } else {
                DBUG_ON(part->reason == xpSuccess);
@@ -328,16 +336,16 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
 
        spin_lock_irqsave(&part->act_lock, irq_flags);
 
-       if (part->act_state == XPC_P_INACTIVE) {
+       if (part->act_state == XPC_P_AS_INACTIVE) {
                XPC_SET_REASON(part, reason, line);
                spin_unlock_irqrestore(&part->act_lock, irq_flags);
                if (reason == xpReactivating) {
                        /* we interrupt ourselves to reactivate partition */
-                       xpc_request_partition_reactivation(part);
+                       xpc_arch_ops.request_partition_reactivation(part);
                }
                return;
        }
-       if (part->act_state == XPC_P_DEACTIVATING) {
+       if (part->act_state == XPC_P_AS_DEACTIVATING) {
                if ((part->reason == xpUnloading && reason != xpUnloading) ||
                    reason == xpReactivating) {
                        XPC_SET_REASON(part, reason, line);
@@ -346,13 +354,13 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
                return;
        }
 
-       part->act_state = XPC_P_DEACTIVATING;
+       part->act_state = XPC_P_AS_DEACTIVATING;
        XPC_SET_REASON(part, reason, line);
 
        spin_unlock_irqrestore(&part->act_lock, irq_flags);
 
        /* ask remote partition to deactivate with regard to us */
-       xpc_request_partition_deactivation(part);
+       xpc_arch_ops.request_partition_deactivation(part);
 
        /* set a timelimit on the disengage phase of the deactivation request */
        part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
@@ -377,7 +385,7 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
                XPC_PARTID(part));
 
        spin_lock_irqsave(&part->act_lock, irq_flags);
-       part->act_state = XPC_P_INACTIVE;
+       part->act_state = XPC_P_AS_INACTIVE;
        spin_unlock_irqrestore(&part->act_lock, irq_flags);
        part->remote_rp_pa = 0;
 }
@@ -396,7 +404,7 @@ xpc_discovery(void)
 {
        void *remote_rp_base;
        struct xpc_rsvd_page *remote_rp;
-       u64 remote_rp_pa;
+       unsigned long remote_rp_pa;
        int region;
        int region_size;
        int max_regions;
@@ -426,7 +434,7 @@ xpc_discovery(void)
         * protection is in regards to memory, IOI and IPI.
         */
        max_regions = 64;
-       region_size = sn_region_size;
+       region_size = xp_region_size;
 
        switch (region_size) {
        case 128:
@@ -490,7 +498,7 @@ xpc_discovery(void)
                                continue;
                        }
 
-                       xpc_request_partition_activation(remote_rp,
+                       xpc_arch_ops.request_partition_activation(remote_rp,
                                                         remote_rp_pa, nasid);
                }
        }
@@ -507,7 +515,7 @@ enum xp_retval
 xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
 {
        struct xpc_partition *part;
-       u64 part_nasid_pa;
+       unsigned long part_nasid_pa;
 
        part = &xpc_partitions[partid];
        if (part->remote_rp_pa == 0)
@@ -515,8 +523,8 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
 
        memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
 
-       part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
+       part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
 
-       return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa,
+       return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
                                xpc_nasid_mask_nbytes);
 }