sparc64: Allow chmc to be built as a module.
[safe/jmp/linux-2.6] / arch / sparc64 / kernel / pci_sun4v.c
index 466f4aa..a104c80 100644 (file)
@@ -1,6 +1,6 @@
 /* pci_sun4v.c: SUN4V specific PCI controller support.
  *
- * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/kernel.h>
@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p)
        return 0;
 }
 
+static inline void iommu_batch_new_entry(unsigned long entry)
+{
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+       if (p->entry + p->npages == entry)
+               return;
+       if (p->entry != ~0UL)
+               iommu_batch_flush(p);
+       p->entry = entry;
+}
+
 /* Interrupts must be disabled.  */
 static inline long iommu_batch_add(u64 phys_page)
 {
@@ -113,61 +124,15 @@ static inline long iommu_batch_end(void)
        return iommu_batch_flush(p);
 }
 
-static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
-{
-       unsigned long n, i, start, end, limit;
-       int pass;
-
-       limit = arena->limit;
-       start = arena->hint;
-       pass = 0;
-
-again:
-       n = find_next_zero_bit(arena->map, limit, start);
-       end = n + npages;
-       if (unlikely(end >= limit)) {
-               if (likely(pass < 1)) {
-                       limit = start;
-                       start = 0;
-                       pass++;
-                       goto again;
-               } else {
-                       /* Scanned the whole thing, give up. */
-                       return -1;
-               }
-       }
-
-       for (i = n; i < end; i++) {
-               if (test_bit(i, arena->map)) {
-                       start = i + 1;
-                       goto again;
-               }
-       }
-
-       for (i = n; i < end; i++)
-               __set_bit(i, arena->map);
-
-       arena->hint = end;
-
-       return n;
-}
-
-static void arena_free(struct iommu_arena *arena, unsigned long base,
-                      unsigned long npages)
-{
-       unsigned long i;
-
-       for (i = base; i < (base + npages); i++)
-               __clear_bit(i, arena->map);
-}
-
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
                                   dma_addr_t *dma_addrp, gfp_t gfp)
 {
-       struct iommu *iommu;
        unsigned long flags, order, first_page, npages, n;
+       struct iommu *iommu;
+       struct page *page;
        void *ret;
        long entry;
+       int nid;
 
        size = IO_PAGE_ALIGN(size);
        order = get_order(size);
@@ -176,20 +141,22 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 
        npages = size >> IO_PAGE_SHIFT;
 
-       first_page = __get_free_pages(gfp, order);
-       if (unlikely(first_page == 0UL))
+       nid = dev->archdata.numa_node;
+       page = alloc_pages_node(nid, gfp, order);
+       if (unlikely(!page))
                return NULL;
 
+       first_page = (unsigned long) page_address(page);
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
        iommu = dev->archdata.iommu;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       entry = arena_alloc(&iommu->arena, npages);
+       entry = iommu_range_alloc(dev, iommu, npages, NULL);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
-       if (unlikely(entry < 0L))
-               goto arena_alloc_fail;
+       if (unlikely(entry == DMA_ERROR_CODE))
+               goto range_alloc_fail;
 
        *dma_addrp = (iommu->page_table_map_base +
                      (entry << IO_PAGE_SHIFT));
@@ -219,10 +186,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 iommu_map_fail:
        /* Interrupts are disabled.  */
        spin_lock(&iommu->lock);
-       arena_free(&iommu->arena, entry, npages);
+       iommu_range_free(iommu, *dma_addrp, npages);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
-arena_alloc_fail:
+range_alloc_fail:
        free_pages(first_page, order);
        return NULL;
 }
@@ -243,7 +210,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 
        spin_lock_irqsave(&iommu->lock, flags);
 
-       arena_free(&iommu->arena, entry, npages);
+       iommu_range_free(iommu, dvma, npages);
 
        do {
                unsigned long num;
@@ -281,10 +248,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
        npages >>= IO_PAGE_SHIFT;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       entry = arena_alloc(&iommu->arena, npages);
+       entry = iommu_range_alloc(dev, iommu, npages, NULL);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
-       if (unlikely(entry < 0L))
+       if (unlikely(entry == DMA_ERROR_CODE))
                goto bad;
 
        bus_addr = (iommu->page_table_map_base +
@@ -319,7 +286,7 @@ bad:
 iommu_map_fail:
        /* Interrupts are disabled.  */
        spin_lock(&iommu->lock);
-       arena_free(&iommu->arena, entry, npages);
+       iommu_range_free(iommu, bus_addr, npages);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return DMA_ERROR_CODE;
@@ -350,9 +317,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
 
        spin_lock_irqsave(&iommu->lock, flags);
 
-       entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
-       arena_free(&iommu->arena, entry, npages);
+       iommu_range_free(iommu, bus_addr, npages);
 
+       entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
        do {
                unsigned long num;
 
@@ -365,175 +332,142 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-#define SG_ENT_PHYS_ADDRESS(SG)        \
-       (__pa(page_address((SG)->page)) + (SG)->offset)
-
-static inline long fill_sg(long entry, struct device *dev,
-                          struct scatterlist *sg,
-                          int nused, int nelems, unsigned long prot)
+static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
+                        int nelems, enum dma_data_direction direction)
 {
-       struct scatterlist *dma_sg = sg;
-       struct scatterlist *sg_end = sg + nelems;
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-
-       iommu_batch_start(dev, prot, entry);
-
-       for (i = 0; i < nused; i++) {
-               unsigned long pteval = ~0UL;
-               u32 dma_npages;
-
-               dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
-                             dma_sg->dma_length +
-                             ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
-               do {
-                       unsigned long offset;
-                       signed int len;
-
-                       /* If we are here, we know we have at least one
-                        * more page to map.  So walk forward until we
-                        * hit a page crossing, and begin creating new
-                        * mappings from that spot.
-                        */
-                       for (;;) {
-                               unsigned long tmp;
-
-                               tmp = SG_ENT_PHYS_ADDRESS(sg);
-                               len = sg->length;
-                               if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
-                                       pteval = tmp & IO_PAGE_MASK;
-                                       offset = tmp & (IO_PAGE_SIZE - 1UL);
-                                       break;
-                               }
-                               if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
-                                       pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
-                                       offset = 0UL;
-                                       len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
-                                       break;
-                               }
-                               sg++;
-                       }
+       struct scatterlist *s, *outs, *segstart;
+       unsigned long flags, handle, prot;
+       dma_addr_t dma_next = 0, dma_addr;
+       unsigned int max_seg_size;
+       unsigned long seg_boundary_size;
+       int outcount, incount, i;
+       struct iommu *iommu;
+       unsigned long base_shift;
+       long err;
 
-                       pteval = (pteval & IOPTE_PAGE);
-                       while (len > 0) {
-                               long err;
+       BUG_ON(direction == DMA_NONE);
 
-                               err = iommu_batch_add(pteval);
-                               if (unlikely(err < 0L))
-                                       goto iommu_map_failed;
+       iommu = dev->archdata.iommu;
+       if (nelems == 0 || !iommu)
+               return 0;
+       
+       prot = HV_PCI_MAP_ATTR_READ;
+       if (direction != DMA_TO_DEVICE)
+               prot |= HV_PCI_MAP_ATTR_WRITE;
 
-                               pteval += IO_PAGE_SIZE;
-                               len -= (IO_PAGE_SIZE - offset);
-                               offset = 0;
-                               dma_npages--;
-                       }
+       outs = s = segstart = &sglist[0];
+       outcount = 1;
+       incount = nelems;
+       handle = 0;
 
-                       pteval = (pteval & IOPTE_PAGE) + len;
-                       sg++;
+       /* Init first segment length for backout at failure */
+       outs->dma_length = 0;
 
-                       /* Skip over any tail mappings we've fully mapped,
-                        * adjusting pteval along the way.  Stop when we
-                        * detect a page crossing event.
-                        */
-                       while (sg < sg_end &&
-                              (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
-                              (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
-                              ((pteval ^
-                                (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
-                               pteval += sg->length;
-                               sg++;
-                       }
-                       if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
-                               pteval = ~0UL;
-               } while (dma_npages != 0);
-               dma_sg++;
-       }
-
-       if (unlikely(iommu_batch_end() < 0L))
-               goto iommu_map_failed;
+       spin_lock_irqsave(&iommu->lock, flags);
 
-       local_irq_restore(flags);
-       return 0;
+       iommu_batch_start(dev, prot, ~0UL);
 
-iommu_map_failed:
-       local_irq_restore(flags);
-       return -1L;
-}
+       max_seg_size = dma_get_max_seg_size(dev);
+       seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+                                 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+       base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+       for_each_sg(sglist, s, nelems, i) {
+               unsigned long paddr, npages, entry, out_entry = 0, slen;
 
-static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
-                        int nelems, enum dma_data_direction direction)
-{
-       struct iommu *iommu;
-       unsigned long flags, npages, prot;
-       u32 dma_base;
-       struct scatterlist *sgtmp;
-       long entry, err;
-       int used;
-
-       /* Fast path single entry scatterlists. */
-       if (nelems == 1) {
-               sglist->dma_address =
-                       dma_4v_map_single(dev,
-                                         (page_address(sglist->page) +
-                                          sglist->offset),
-                                         sglist->length, direction);
-               if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
-                       return 0;
-               sglist->dma_length = sglist->length;
-               return 1;
-       }
+               slen = s->length;
+               /* Sanity check */
+               if (slen == 0) {
+                       dma_next = 0;
+                       continue;
+               }
+               /* Allocate iommu entries for that segment */
+               paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
+               npages = iommu_num_pages(paddr, slen);
+               entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
-       iommu = dev->archdata.iommu;
-       
-       if (unlikely(direction == DMA_NONE))
-               goto bad;
+               /* Handle failure */
+               if (unlikely(entry == DMA_ERROR_CODE)) {
+                       if (printk_ratelimit())
+                               printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
+                                      " npages %lx\n", iommu, paddr, npages);
+                       goto iommu_map_failed;
+               }
 
-       /* Step 1: Prepare scatter list. */
-       npages = prepare_sg(sglist, nelems);
+               iommu_batch_new_entry(entry);
 
-       /* Step 2: Allocate a cluster and context, if necessary. */
-       spin_lock_irqsave(&iommu->lock, flags);
-       entry = arena_alloc(&iommu->arena, npages);
-       spin_unlock_irqrestore(&iommu->lock, flags);
+               /* Convert entry to a dma_addr_t */
+               dma_addr = iommu->page_table_map_base +
+                       (entry << IO_PAGE_SHIFT);
+               dma_addr |= (s->offset & ~IO_PAGE_MASK);
 
-       if (unlikely(entry < 0L))
-               goto bad;
+               /* Insert into HW table */
+               paddr &= IO_PAGE_MASK;
+               while (npages--) {
+                       err = iommu_batch_add(paddr);
+                       if (unlikely(err < 0L))
+                               goto iommu_map_failed;
+                       paddr += IO_PAGE_SIZE;
+               }
 
-       dma_base = iommu->page_table_map_base +
-               (entry << IO_PAGE_SHIFT);
+               /* If we are in an open segment, try merging */
+               if (segstart != s) {
+                       /* We cannot merge if:
+                        * - allocated dma_addr isn't contiguous to previous allocation
+                        */
+                       if ((dma_addr != dma_next) ||
+                           (outs->dma_length + s->length > max_seg_size) ||
+                           (is_span_boundary(out_entry, base_shift,
+                                             seg_boundary_size, outs, s))) {
+                               /* Can't merge: create a new segment */
+                               segstart = s;
+                               outcount++;
+                               outs = sg_next(outs);
+                       } else {
+                               outs->dma_length += s->length;
+                       }
+               }
 
-       /* Step 3: Normalize DMA addresses. */
-       used = nelems;
+               if (segstart == s) {
+                       /* This is a new segment, fill entries */
+                       outs->dma_address = dma_addr;
+                       outs->dma_length = slen;
+                       out_entry = entry;
+               }
 
-       sgtmp = sglist;
-       while (used && sgtmp->dma_length) {
-               sgtmp->dma_address += dma_base;
-               sgtmp++;
-               used--;
+               /* Calculate next page pointer for contiguous check */
+               dma_next = dma_addr + slen;
        }
-       used = nelems - used;
 
-       /* Step 4: Create the mappings. */
-       prot = HV_PCI_MAP_ATTR_READ;
-       if (direction != DMA_TO_DEVICE)
-               prot |= HV_PCI_MAP_ATTR_WRITE;
+       err = iommu_batch_end();
 
-       err = fill_sg(entry, dev, sglist, used, nelems, prot);
        if (unlikely(err < 0L))
                goto iommu_map_failed;
 
-       return used;
+       spin_unlock_irqrestore(&iommu->lock, flags);
 
-bad:
-       if (printk_ratelimit())
-               WARN_ON(1);
-       return 0;
+       if (outcount < incount) {
+               outs = sg_next(outs);
+               outs->dma_address = DMA_ERROR_CODE;
+               outs->dma_length = 0;
+       }
+
+       return outcount;
 
 iommu_map_failed:
-       spin_lock_irqsave(&iommu->lock, flags);
-       arena_free(&iommu->arena, entry, npages);
+       for_each_sg(sglist, s, nelems, i) {
+               if (s->dma_length != 0) {
+                       unsigned long vaddr, npages;
+
+                       vaddr = s->dma_address & IO_PAGE_MASK;
+                       npages = iommu_num_pages(s->dma_address, s->dma_length);
+                       iommu_range_free(iommu, vaddr, npages);
+                       /* XXX demap? XXX */
+                       s->dma_address = DMA_ERROR_CODE;
+                       s->dma_length = 0;
+               }
+               if (s == outs)
+                       break;
+       }
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
@@ -543,43 +477,42 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction)
 {
        struct pci_pbm_info *pbm;
+       struct scatterlist *sg;
        struct iommu *iommu;
-       unsigned long flags, i, npages;
-       long entry;
-       u32 devhandle, bus_addr;
+       unsigned long flags;
+       u32 devhandle;
 
-       if (unlikely(direction == DMA_NONE)) {
-               if (printk_ratelimit())
-                       WARN_ON(1);
-       }
+       BUG_ON(direction == DMA_NONE);
 
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
        devhandle = pbm->devhandle;
        
-       bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
-       for (i = 1; i < nelems; i++)
-               if (sglist[i].dma_length == 0)
-                       break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
-                 bus_addr) >> IO_PAGE_SHIFT;
+       spin_lock_irqsave(&iommu->lock, flags);
 
-       entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+       sg = sglist;
+       while (nelems--) {
+               dma_addr_t dma_handle = sg->dma_address;
+               unsigned int len = sg->dma_length;
+               unsigned long npages, entry;
 
-       spin_lock_irqsave(&iommu->lock, flags);
+               if (!len)
+                       break;
+               npages = iommu_num_pages(dma_handle, len);
+               iommu_range_free(iommu, dma_handle, npages);
 
-       arena_free(&iommu->arena, entry, npages);
+               entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+               while (npages) {
+                       unsigned long num;
 
-       do {
-               unsigned long num;
+                       num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+                                                   npages);
+                       entry += num;
+                       npages -= num;
+               }
 
-               num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
-                                           npages);
-               entry += num;
-               npages -= num;
-       } while (npages != 0);
+               sg = sg_next(sg);
+       }
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
@@ -598,7 +531,7 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
        /* Nothing to do... */
 }
 
-const struct dma_ops sun4v_dma_ops = {
+static const struct dma_ops sun4v_dma_ops = {
        .alloc_coherent                 = dma_4v_alloc_coherent,
        .free_coherent                  = dma_4v_free_coherent,
        .map_single                     = dma_4v_map_single,
@@ -609,7 +542,7 @@ const struct dma_ops sun4v_dma_ops = {
        .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
 };
 
-static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
+static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
 {
        struct property *prop;
        struct device_node *dp;
@@ -622,8 +555,8 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
        /* XXX register error interrupt handlers XXX */
 }
 
-static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
-                                           struct iommu *iommu)
+static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
+                                                  struct iommu *iommu)
 {
        struct iommu_arena *arena = &iommu->arena;
        unsigned long i, cnt = 0;
@@ -650,7 +583,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
        return cnt;
 }
 
-static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
+static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 {
        struct iommu *iommu = pbm->iommu;
        struct property *prop;
@@ -748,111 +681,102 @@ struct pci_sun4v_msiq_entry {
        u64             reserved2;
 };
 
-/* For now this just runs as a pre-handler for the real interrupt handler.
- * So we just walk through the queue and ACK all the entries, update the
- * head pointer, and return.
- *
- * In the longer term it would be nice to do something more integrated
- * wherein we can pass in some of this MSI info to the drivers.  This
- * would be most useful for PCIe fabric error messages, although we could
- * invoke those directly from the loop here in order to pass the info around.
- */
-static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
+static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+                             unsigned long *head)
 {
-       struct pci_pbm_info *pbm = data1;
-       struct pci_sun4v_msiq_entry *base, *ep;
-       unsigned long msiqid, orig_head, head, type, err;
+       unsigned long err, limit;
 
-       msiqid = (unsigned long) data2;
-
-       head = 0xdeadbeef;
-       err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
+       err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
        if (unlikely(err))
-               goto hv_error_get;
-
-       if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
-               goto bad_offset;
-
-       head /= sizeof(struct pci_sun4v_msiq_entry);
-       orig_head = head;
-       base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
-                                  (pbm->msiq_ent_count *
-                                   sizeof(struct pci_sun4v_msiq_entry))));
-       ep = &base[head];
-       while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
-               type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
-               if (unlikely(type != MSIQ_TYPE_MSI32 &&
-                            type != MSIQ_TYPE_MSI64))
-                       goto bad_type;
-
-               pci_sun4v_msi_setstate(pbm->devhandle,
-                                      ep->msi_data /* msi_num */,
-                                      HV_MSISTATE_IDLE);
-
-               /* Clear the entry.  */
-               ep->version_type &= ~MSIQ_TYPE_MASK;
-
-               /* Go to next entry in ring.  */
-               head++;
-               if (head >= pbm->msiq_ent_count)
-                       head = 0;
-               ep = &base[head];
-       }
+               return -ENXIO;
 
-       if (likely(head != orig_head)) {
-               /* ACK entries by updating head pointer.  */
-               head *= sizeof(struct pci_sun4v_msiq_entry);
-               err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
-               if (unlikely(err))
-                       goto hv_error_set;
-       }
-       return;
+       limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+       if (unlikely(*head >= limit))
+               return -EFBIG;
 
-hv_error_set:
-       printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err);
-       goto hv_error_cont;
+       return 0;
+}
 
-hv_error_get:
-       printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err);
+static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
+                                unsigned long msiqid, unsigned long *head,
+                                unsigned long *msi)
+{
+       struct pci_sun4v_msiq_entry *ep;
+       unsigned long err, type;
 
-hv_error_cont:
-       printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n",
-              pbm->devhandle, msiqid, head);
-       return;
+       /* Note: void pointer arithmetic, 'head' is a byte offset  */
+       ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
+                                (pbm->msiq_ent_count *
+                                 sizeof(struct pci_sun4v_msiq_entry))) +
+             *head);
 
-bad_offset:
-       printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n",
-              head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
-       return;
+       if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
+               return 0;
 
-bad_type:
-       printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type);
-       return;
+       type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
+       if (unlikely(type != MSIQ_TYPE_MSI32 &&
+                    type != MSIQ_TYPE_MSI64))
+               return -EINVAL;
+
+       *msi = ep->msi_data;
+
+       err = pci_sun4v_msi_setstate(pbm->devhandle,
+                                    ep->msi_data /* msi_num */,
+                                    HV_MSISTATE_IDLE);
+       if (unlikely(err))
+               return -ENXIO;
+
+       /* Clear the entry.  */
+       ep->version_type &= ~MSIQ_TYPE_MASK;
+
+       (*head) += sizeof(struct pci_sun4v_msiq_entry);
+       if (*head >=
+           (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
+               *head = 0;
+
+       return 1;
 }
 
-static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
+static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+                             unsigned long head)
 {
-       unsigned long size, bits_per_ulong;
+       unsigned long err;
 
-       bits_per_ulong = sizeof(unsigned long) * 8;
-       size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
-       size /= 8;
-       BUG_ON(size % sizeof(unsigned long));
+       err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
+       if (unlikely(err))
+               return -EINVAL;
 
-       pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
-       if (!pbm->msi_bitmap)
-               return -ENOMEM;
+       return 0;
+}
 
+static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
+                              unsigned long msi, int is_msi64)
+{
+       if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
+                                 (is_msi64 ?
+                                  HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
+               return -ENXIO;
+       if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
+               return -ENXIO;
+       if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
+               return -ENXIO;
        return 0;
 }
 
-static void msi_bitmap_free(struct pci_pbm_info *pbm)
+static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
 {
-       kfree(pbm->msi_bitmap);
-       pbm->msi_bitmap = NULL;
+       unsigned long err, msiqid;
+
+       err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
+       if (err)
+               return -ENXIO;
+
+       pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
+
+       return 0;
 }
 
-static int msi_queue_alloc(struct pci_pbm_info *pbm)
+static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
 {
        unsigned long q_size, alloc_size, pages, order;
        int i;
@@ -906,234 +830,59 @@ h_error:
        return -EINVAL;
 }
 
-
-static int alloc_msi(struct pci_pbm_info *pbm)
+static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
 {
+       unsigned long q_size, alloc_size, pages, order;
        int i;
 
-       for (i = 0; i < pbm->msi_num; i++) {
-               if (!test_and_set_bit(i, pbm->msi_bitmap))
-                       return i + pbm->msi_first;
-       }
-
-       return -ENOENT;
-}
-
-static void free_msi(struct pci_pbm_info *pbm, int msi_num)
-{
-       msi_num -= pbm->msi_first;
-       clear_bit(msi_num, pbm->msi_bitmap);
-}
-
-static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
-                                  struct pci_dev *pdev,
-                                  struct msi_desc *entry)
-{
-       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
-       unsigned long devino, msiqid;
-       struct msi_msg msg;
-       int msi_num, err;
-
-       *virt_irq_p = 0;
-
-       msi_num = alloc_msi(pbm);
-       if (msi_num < 0)
-               return msi_num;
-
-       devino = sun4v_build_msi(pbm->devhandle, virt_irq_p,
-                                pbm->msiq_first_devino,
-                                (pbm->msiq_first_devino +
-                                 pbm->msiq_num));
-       err = -ENOMEM;
-       if (!devino)
-               goto out_err;
-
-       msiqid = ((devino - pbm->msiq_first_devino) +
-                 pbm->msiq_first);
-
-       err = -EINVAL;
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-       if (err)
-               goto out_err;
-
-       if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
-               goto out_err;
-
-       if (pci_sun4v_msi_setmsiq(pbm->devhandle,
-                                 msi_num, msiqid,
-                                 (entry->msi_attrib.is_64 ?
-                                  HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
-               goto out_err;
-
-       if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
-               goto out_err;
-
-       if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
-               goto out_err;
-
-       pdev->dev.archdata.msi_num = msi_num;
+       for (i = 0; i < pbm->msiq_num; i++) {
+               unsigned long msiqid = pbm->msiq_first + i;
 
-       if (entry->msi_attrib.is_64) {
-               msg.address_hi = pbm->msi64_start >> 32;
-               msg.address_lo = pbm->msi64_start & 0xffffffff;
-       } else {
-               msg.address_hi = 0;
-               msg.address_lo = pbm->msi32_start;
+               (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
        }
-       msg.data = msi_num;
-
-       set_irq_msi(*virt_irq_p, entry);
-       write_msi_msg(*virt_irq_p, &msg);
 
-       irq_install_pre_handler(*virt_irq_p,
-                               pci_sun4v_msi_prehandler,
-                               pbm, (void *) msiqid);
+       q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+       alloc_size = (pbm->msiq_num * q_size);
+       order = get_order(alloc_size);
 
-       return 0;
+       pages = (unsigned long) pbm->msi_queues;
 
-out_err:
-       free_msi(pbm, msi_num);
-       sun4v_destroy_msi(*virt_irq_p);
-       *virt_irq_p = 0;
-       return err;
+       free_pages(pages, order);
 
+       pbm->msi_queues = NULL;
 }
 
-static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
-                                      struct pci_dev *pdev)
+static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
+                                   unsigned long msiqid,
+                                   unsigned long devino)
 {
-       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
-       unsigned long msiqid, err;
-       unsigned int msi_num;
-
-       msi_num = pdev->dev.archdata.msi_num;
-       err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
-       if (err) {
-               printk(KERN_ERR "%s: getmsiq gives error %lu\n",
-                      pbm->name, err);
-               return;
-       }
+       unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
 
-       pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID);
-       pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID);
+       if (!virt_irq)
+               return -ENOMEM;
 
-       free_msi(pbm, msi_num);
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
+       if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
+               return -EINVAL;
 
-       /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ
-        * allocation.
-        */
-       sun4v_destroy_msi(virt_irq);
+       return virt_irq;
 }
 
+static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
+       .get_head       =       pci_sun4v_get_head,
+       .dequeue_msi    =       pci_sun4v_dequeue_msi,
+       .set_head       =       pci_sun4v_set_head,
+       .msi_setup      =       pci_sun4v_msi_setup,
+       .msi_teardown   =       pci_sun4v_msi_teardown,
+       .msiq_alloc     =       pci_sun4v_msiq_alloc,
+       .msiq_free      =       pci_sun4v_msiq_free,
+       .msiq_build_irq =       pci_sun4v_msiq_build_irq,
+};
+
 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 {
-       const u32 *val;
-       int len;
-
-       val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
-       if (!val || len != 4)
-               goto no_msi;
-       pbm->msiq_num = *val;
-       if (pbm->msiq_num) {
-               const struct msiq_prop {
-                       u32 first_msiq;
-                       u32 num_msiq;
-                       u32 first_devino;
-               } *mqp;
-               const struct msi_range_prop {
-                       u32 first_msi;
-                       u32 num_msi;
-               } *mrng;
-               const struct addr_range_prop {
-                       u32 msi32_high;
-                       u32 msi32_low;
-                       u32 msi32_len;
-                       u32 msi64_high;
-                       u32 msi64_low;
-                       u32 msi64_len;
-               } *arng;
-
-               val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
-               if (!val || len != 4)
-                       goto no_msi;
-
-               pbm->msiq_ent_count = *val;
-
-               mqp = of_get_property(pbm->prom_node,
-                                     "msi-eq-to-devino", &len);
-               if (!mqp || len != sizeof(struct msiq_prop))
-                       goto no_msi;
-
-               pbm->msiq_first = mqp->first_msiq;
-               pbm->msiq_first_devino = mqp->first_devino;
-
-               val = of_get_property(pbm->prom_node, "#msi", &len);
-               if (!val || len != 4)
-                       goto no_msi;
-               pbm->msi_num = *val;
-
-               mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
-               if (!mrng || len != sizeof(struct msi_range_prop))
-                       goto no_msi;
-               pbm->msi_first = mrng->first_msi;
-
-               val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
-               if (!val || len != 4)
-                       goto no_msi;
-               pbm->msi_data_mask = *val;
-
-               val = of_get_property(pbm->prom_node, "msix-data-width", &len);
-               if (!val || len != 4)
-                       goto no_msi;
-               pbm->msix_data_width = *val;
-
-               arng = of_get_property(pbm->prom_node, "msi-address-ranges",
-                                      &len);
-               if (!arng || len != sizeof(struct addr_range_prop))
-                       goto no_msi;
-               pbm->msi32_start = ((u64)arng->msi32_high << 32) |
-                       (u64) arng->msi32_low;
-               pbm->msi64_start = ((u64)arng->msi64_high << 32) |
-                       (u64) arng->msi64_low;
-               pbm->msi32_len = arng->msi32_len;
-               pbm->msi64_len = arng->msi64_len;
-
-               if (msi_bitmap_alloc(pbm))
-                       goto no_msi;
-
-               if (msi_queue_alloc(pbm)) {
-                       msi_bitmap_free(pbm);
-                       goto no_msi;
-               }
-
-               printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
-                      "devino[0x%x]\n",
-                      pbm->name,
-                      pbm->msiq_first, pbm->msiq_num,
-                      pbm->msiq_ent_count,
-                      pbm->msiq_first_devino);
-               printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
-                      "width[%u]\n",
-                      pbm->name,
-                      pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
-                      pbm->msix_data_width);
-               printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
-                      "addr64[0x%lx:0x%x]\n",
-                      pbm->name,
-                      pbm->msi32_start, pbm->msi32_len,
-                      pbm->msi64_start, pbm->msi64_len);
-               printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
-                      pbm->name,
-                      pbm->msi_queues);
-       }
-       pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
-       pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
-
-       return;
-
-no_msi:
-       pbm->msiq_num = 0;
-       printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
+       sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
 }
 #else /* CONFIG_PCI_MSI */
 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
@@ -1141,7 +890,8 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 }
 #endif /* !(CONFIG_PCI_MSI) */
 
-static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
+static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
+                                     struct device_node *dp, u32 devhandle)
 {
        struct pci_pbm_info *pbm;
 
@@ -1153,6 +903,8 @@ static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct devi
        pbm->next = pci_pbm_root;
        pci_pbm_root = pbm;
 
+       pbm->numa_node = of_node_to_nid(dp);
+
        pbm->scan_bus = pci_sun4v_scan_bus;
        pbm->pci_ops = &sun4v_pci_ops;
        pbm->config_space_reg_bits = 12;
@@ -1167,6 +919,7 @@ static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct devi
        pbm->name = dp->full_name;
 
        printk("%s: SUN4V PCI Bus Module\n", pbm->name);
+       printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
 
        pci_determine_mem_io_space(pbm);
 
@@ -1203,6 +956,10 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
        }
 
        prop = of_find_property(dp, "reg", NULL);
+       if (!prop) {
+               prom_printf("SUN4V_PCI: Could not find config registers\n");
+               prom_halt();
+       }
        regs = prop->value;
 
        devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
@@ -1239,11 +996,6 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
 
        p->pbm_B.iommu = iommu;
 
-       /* Like PSYCHO and SCHIZO we have a 2GB aligned area
-        * for memory space.
-        */
-       pci_memspace_mask = 0x7fffffffUL;
-
        pci_sun4v_pbm_init(p, dp, devhandle);
        return;