gru: support instruction completion interrupts
[safe/jmp/linux-2.6] / drivers / misc / sgi-gru / grufault.c
index 61aa80a..3220e95 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/security.h>
 #include <asm/pgtable.h>
 #include "gru.h"
 #include "grutables.h"
@@ -165,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
  * the GRU, atomic operations must be used to clear bits.
  */
 static void get_clear_fault_map(struct gru_state *gru,
-                               struct gru_tlb_fault_map *map)
+                               struct gru_tlb_fault_map *imap,
+                               struct gru_tlb_fault_map *dmap)
 {
        unsigned long i, k;
        struct gru_tlb_fault_map *tfm;
@@ -176,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
                k = tfm->fault_bits[i];
                if (k)
                        k = xchg(&tfm->fault_bits[i], 0UL);
-               map->fault_bits[i] = k;
+               imap->fault_bits[i] = k;
+               k = tfm->done_bits[i];
+               if (k)
+                       k = xchg(&tfm->done_bits[i], 0UL);
+               dmap->fault_bits[i] = k;
        }
 
        /*
@@ -266,6 +272,44 @@ err:
        return 1;
 }
 
+static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
+                   int write, int atomic, unsigned long *gpa, int *pageshift)
+{
+       struct mm_struct *mm = gts->ts_mm;
+       struct vm_area_struct *vma;
+       unsigned long paddr;
+       int ret, ps;
+
+       vma = find_vma(mm, vaddr);
+       if (!vma)
+               goto inval;
+
+       /*
+        * Atomic lookup is faster & usually works even if called in non-atomic
+        * context.
+        */
+       rmb();  /* Must/check ms_range_active before loading PTEs */
+       ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
+       if (ret) {
+               if (atomic)
+                       goto upm;
+               if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
+                       goto inval;
+       }
+       if (is_gru_paddr(paddr))
+               goto inval;
+       paddr = paddr & ~((1UL << ps) - 1);
+       *gpa = uv_soc_phys_ram_to_gpa(paddr);
+       *pageshift = ps;
+       return 0;
+
+inval:
+       return -1;
+upm:
+       return -2;
+}
+
+
 /*
  * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
  *     Input:
@@ -280,10 +324,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
                          struct gru_tlb_fault_handle *tfh,
                          unsigned long __user *cb)
 {
-       struct mm_struct *mm = gts->ts_mm;
-       struct vm_area_struct *vma;
-       int pageshift, asid, write, ret;
-       unsigned long paddr, gpa, vaddr;
+       int pageshift = 0, asid, write, ret, atomic = !cb;
+       unsigned long gpa = 0, vaddr = 0;
 
        /*
         * NOTE: The GRU contains magic hardware that eliminates races between
@@ -297,6 +339,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
         * Might be a hardware race OR a stupid user. Ignore FMM because FMM
         * is a transient state.
         */
+       if (tfh->status != TFHSTATUS_EXCEPTION)
+               goto failnoexception;
        if (tfh->state == TFHSTATE_IDLE)
                goto failidle;
        if (tfh->state == TFHSTATE_MISS_FMM && cb)
@@ -317,28 +361,19 @@ static int gru_try_dropin(struct gru_thread_state *gts,
        if (atomic_read(&gts->ts_gms->ms_range_active))
                goto failactive;
 
-       vma = find_vma(mm, vaddr);
-       if (!vma)
+       ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+       if (ret == -1)
                goto failinval;
+       if (ret == -2)
+               goto failupm;
 
-       /*
-        * Atomic lookup is faster & usually works even if called in non-atomic
-        * context.
-        */
-       rmb();  /* Must/check ms_range_active before loading PTEs */
-       ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
-       if (ret) {
-               if (!cb)
+       if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
+               gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
+               if (atomic || !gru_update_cch(gts, 0)) {
+                       gts->ts_force_cch_reload = 1;
                        goto failupm;
-               if (non_atomic_pte_lookup(vma, vaddr, write, &paddr,
-                                         &pageshift))
-                       goto failinval;
+               }
        }
-       if (is_gru_paddr(paddr))
-               goto failinval;
-
-       paddr = paddr & ~((1UL << pageshift) - 1);
-       gpa = uv_soc_phys_ram_to_gpa(paddr);
        gru_cb_set_istatus_active(cb);
        tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
                          GRU_PAGESIZE(pageshift));
@@ -373,8 +408,17 @@ failfmm:
        gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
        return 0;
 
+failnoexception:
+       /* TFH status did not show exception pending */
+       gru_flush_cache(tfh);
+       if (cb)
+               gru_flush_cache(cb);
+       STAT(tlb_dropin_fail_no_exception);
+       gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
+       return 0;
+
 failidle:
-       /* TFH was idle  - no miss pending */
+       /* TFH state was idle  - no miss pending */
        gru_flush_cache(tfh);
        if (cb)
                gru_flush_cache(cb);
@@ -410,7 +454,7 @@ failactive:
 irqreturn_t gru_intr(int irq, void *dev_id)
 {
        struct gru_state *gru;
-       struct gru_tlb_fault_map map;
+       struct gru_tlb_fault_map imap, dmap;
        struct gru_thread_state *gts;
        struct gru_tlb_fault_handle *tfh = NULL;
        int cbrnum, ctxnum;
@@ -423,11 +467,19 @@ irqreturn_t gru_intr(int irq, void *dev_id)
                        raw_smp_processor_id(), irq);
                return IRQ_NONE;
        }
-       get_clear_fault_map(gru, &map);
-       gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
-               map.fault_bits[0]);
+       get_clear_fault_map(gru, &imap, &dmap);
+       gru_dbg(grudev,
+               "irq %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+               irq, gru->gs_gid, dmap.fault_bits[0], dmap.fault_bits[1],
+               dmap.fault_bits[0], dmap.fault_bits[1]);
+
+       for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
+               complete(gru->gs_blade->bs_async_wq);
+               gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
+                       gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+       }
 
-       for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
+       for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
                tfh = get_tfh_by_index(gru, cbrnum);
                prefetchw(tfh); /* Helps on hdw, required for emulator */
 
@@ -444,7 +496,8 @@ irqreturn_t gru_intr(int irq, void *dev_id)
                 * This is running in interrupt context. Trylock the mmap_sem.
                 * If it fails, retry the fault in user context.
                 */
-               if (down_read_trylock(&gts->ts_mm->mmap_sem)) {
+               if (!gts->ts_force_cch_reload &&
+                                       down_read_trylock(&gts->ts_mm->mmap_sem)) {
                        gru_try_dropin(gts, tfh, NULL);
                        up_read(&gts->ts_mm->mmap_sem);
                } else {
@@ -514,6 +567,14 @@ int gru_handle_user_call_os(unsigned long cb)
                gts->ts_force_unload = 1;
        }
 
+       /*
+        * CCH may contain stale data if ts_force_cch_reload is set.
+        */
+       if (gts->ts_gru && gts->ts_force_cch_reload) {
+               gts->ts_force_cch_reload = 0;
+               gru_update_cch(gts, 0);
+       }
+
        ret = -EAGAIN;
        cbrnum = thread_cbr_number(gts, ucbnum);
        if (gts->ts_force_unload) {
@@ -559,14 +620,19 @@ int gru_get_exception_detail(unsigned long arg)
                excdet.ecause = cbe->ecause;
                excdet.exceptdet0 = cbe->idef1upd;
                excdet.exceptdet1 = cbe->idef3upd;
+               excdet.cbrstate = cbe->cbrstate;
+               excdet.cbrexecstatus = cbe->cbrexecstatus;
                ret = 0;
        } else {
                ret = -EAGAIN;
        }
        gru_unlock_gts(gts);
 
-       gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
-               excdet.ecause);
+       gru_dbg(grudev,
+               "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
+               "exdet0 0x%lx, exdet1 0x%x\n",
+               excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
+               excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
        if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
                ret = -EFAULT;
        return ret;
@@ -575,6 +641,31 @@ int gru_get_exception_detail(unsigned long arg)
 /*
  * User request to unload a context. Content is saved for possible reload.
  */
+static int gru_unload_all_contexts(void)
+{
+       struct gru_thread_state *gts;
+       struct gru_state *gru;
+       int gid, ctxnum;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       foreach_gid(gid) {
+               gru = GID_TO_GRU(gid);
+               spin_lock(&gru->gs_lock);
+               for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
+                       gts = gru->gs_gts[ctxnum];
+                       if (gts && mutex_trylock(&gts->ts_ctxlock)) {
+                               spin_unlock(&gru->gs_lock);
+                               gru_unload_context(gts, 1);
+                               mutex_unlock(&gts->ts_ctxlock);
+                               spin_lock(&gru->gs_lock);
+                       }
+               }
+               spin_unlock(&gru->gs_lock);
+       }
+       return 0;
+}
+
 int gru_user_unload_context(unsigned long arg)
 {
        struct gru_thread_state *gts;
@@ -586,6 +677,9 @@ int gru_user_unload_context(unsigned long arg)
 
        gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
 
+       if (!req.gseg)
+               return gru_unload_all_contexts();
+
        gts = gru_find_lock_gts(req.gseg);
        if (!gts)
                return -EINVAL;