/* pci_sun4v.c: SUN4V specific PCI controller support.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/log2.h>
+#include <linux/of_device.h>
-#include <asm/pbm.h>
#include <asm/iommu.h>
#include <asm/irq.h>
-#include <asm/upa.h>
-#include <asm/pstate.h>
-#include <asm/oplib.h>
#include <asm/hypervisor.h>
+#include <asm/prom.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "pci_sun4v.h"
-#define PGLIST_NENTS 2048
+#define DRIVER_NAME "pci_sun4v"
+#define PFX DRIVER_NAME ": "
-struct sun4v_pglist {
- u64 pglist[PGLIST_NENTS];
+static unsigned long vpci_major = 1;
+static unsigned long vpci_minor = 1;
+
+#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
+
+struct iommu_batch {
+ struct device *dev; /* Device mapping is for. */
+ unsigned long prot; /* IOMMU page protections */
+ unsigned long entry; /* Index into IOTSB. */
+ u64 *pglist; /* List of physical pages */
+ unsigned long npages; /* Number of pages in list. */
};
-static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
+static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
-static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
+/* Interrupts must be disabled. */
+static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
- unsigned long n, i, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = find_next_zero_bit(arena->map, limit, start);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+ p->dev = dev;
+ p->prot = prot;
+ p->entry = entry;
+ p->npages = 0;
+}
+
+/* Interrupts must be disabled. */
+static long iommu_batch_flush(struct iommu_batch *p)
+{
+ struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+ unsigned long devhandle = pbm->devhandle;
+ unsigned long prot = p->prot;
+ unsigned long entry = p->entry;
+ u64 *pglist = p->pglist;
+ unsigned long npages = p->npages;
+
+ while (npages != 0) {
+ long num;
+
+ num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist));
+ if (unlikely(num < 0)) {
+ if (printk_ratelimit())
+ printk("iommu_batch_flush: IOMMU map of "
+ "[%08lx:%08lx:%lx:%lx:%lx] failed with "
+ "status %ld\n",
+ devhandle, HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist), num);
return -1;
}
- }
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
+ entry += num;
+ npages -= num;
+ pglist += num;
}
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
+ p->entry = entry;
+ p->npages = 0;
+
+ return 0;
+}
+
+static inline void iommu_batch_new_entry(unsigned long entry)
+{
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+ if (p->entry + p->npages == entry)
+ return;
+ if (p->entry != ~0UL)
+ iommu_batch_flush(p);
+ p->entry = entry;
+}
+
+/* Interrupts must be disabled. */
+static inline long iommu_batch_add(u64 phys_page)
+{
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+ BUG_ON(p->npages >= PGLIST_NENTS);
- arena->hint = end;
+ p->pglist[p->npages++] = phys_page;
+ if (p->npages == PGLIST_NENTS)
+ return iommu_batch_flush(p);
- return n;
+ return 0;
}
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+/* Interrupts must be disabled. */
+static inline long iommu_batch_end(void)
{
- unsigned long i;
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+
+ BUG_ON(p->npages >= PGLIST_NENTS);
- for (i = base; i < (base + npages); i++)
- __clear_bit(i, arena->map);
+ return iommu_batch_flush(p);
}
-static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- unsigned long devhandle, flags, order, first_page, npages, n;
+ unsigned long flags, order, first_page, npages, n;
+ struct iommu *iommu;
+ struct page *page;
void *ret;
long entry;
- u64 *pglist;
- int cpu;
+ int nid;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
- if (order >= MAX_ORDER)
+ if (unlikely(order >= MAX_ORDER))
return NULL;
npages = size >> IO_PAGE_SHIFT;
- if (npages > PGLIST_NENTS)
- return NULL;
- first_page = __get_free_pages(GFP_ATOMIC, order);
- if (first_page == 0UL)
+ nid = dev->archdata.numa_node;
+ page = alloc_pages_node(nid, gfp, order);
+ if (unlikely(!page))
return NULL;
+
+ first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
- pcp = pdev->sysdata;
- devhandle = pcp->pbm->devhandle;
- iommu = pcp->pbm->iommu;
+ iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags);
- if (unlikely(entry < 0L)) {
- free_pages(first_page, order);
- return NULL;
- }
+ if (unlikely(entry == DMA_ERROR_CODE))
+ goto range_alloc_fail;
*dma_addrp = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
- cpu = get_cpu();
+ local_irq_save(flags);
- pglist = &__get_cpu_var(iommu_pglists).pglist[0];
- for (n = 0; n < npages; n++)
- pglist[n] = first_page + (n * PAGE_SIZE);
+ iommu_batch_start(dev,
+ (HV_PCI_MAP_ATTR_READ |
+ HV_PCI_MAP_ATTR_WRITE),
+ entry);
- do {
- unsigned long num;
+ for (n = 0; n < npages; n++) {
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+ if (unlikely(err < 0L))
+ goto iommu_map_fail;
+ }
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages,
- (HV_PCI_MAP_ATTR_READ |
- HV_PCI_MAP_ATTR_WRITE),
- __pa(pglist));
- entry += num;
- npages -= num;
- pglist += num;
- } while (npages != 0);
+ if (unlikely(iommu_batch_end() < 0L))
+ goto iommu_map_fail;
- put_cpu();
+ local_irq_restore(flags);
return ret;
+
+iommu_map_fail:
+ /* Interrupts are disabled. */
+ spin_lock(&iommu->lock);
+ iommu_range_free(iommu, *dma_addrp, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+range_alloc_fail:
+ free_pages(first_page, order);
+ return NULL;
}
-static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
+ dma_addr_t dvma)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- unsigned long flags, order, npages, entry, devhandle;
+ struct pci_pbm_info *pbm;
+ struct iommu *iommu;
+ unsigned long flags, order, npages, entry;
+ u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
+ devhandle = pbm->devhandle;
entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
- pci_arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, dvma, npages);
do {
unsigned long num;
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
+ enum dma_data_direction direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr, devhandle;
+ unsigned long i, base_paddr;
u32 bus_addr, ret;
unsigned long prot;
long entry;
- u64 *pglist;
- int cpu;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = dev->archdata.iommu;
- if (unlikely(direction == PCI_DMA_NONE))
+ if (unlikely(direction == DMA_NONE))
goto bad;
oaddr = (unsigned long)ptr;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- if (unlikely(npages > PGLIST_NENTS))
- goto bad;
spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
spin_unlock_irqrestore(&iommu->lock, flags);
- if (unlikely(entry < 0L))
+ if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
bus_addr = (iommu->page_table_map_base +
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
- if (direction != PCI_DMA_TODEVICE)
+ if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
- cpu = get_cpu();
-
- pglist = &__get_cpu_var(iommu_pglists).pglist[0];
- for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
- pglist[i] = base_paddr;
+ local_irq_save(flags);
- do {
- unsigned long num;
+ iommu_batch_start(dev, prot, entry);
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages, prot,
- __pa(pglist));
- entry += num;
- npages -= num;
- pglist += num;
- } while (npages != 0);
+ for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
+ long err = iommu_batch_add(base_paddr);
+ if (unlikely(err < 0L))
+ goto iommu_map_fail;
+ }
+ if (unlikely(iommu_batch_end() < 0L))
+ goto iommu_map_fail;
- put_cpu();
+ local_irq_restore(flags);
return ret;
bad:
if (printk_ratelimit())
WARN_ON(1);
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
+
+iommu_map_fail:
+ /* Interrupts are disabled. */
+ spin_lock(&iommu->lock);
+ iommu_range_free(iommu, bus_addr, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return DMA_ERROR_CODE;
}
-static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- unsigned long flags, npages, devhandle;
+ struct pci_pbm_info *pbm;
+ struct iommu *iommu;
+ unsigned long flags, npages;
long entry;
+ u32 devhandle;
- if (unlikely(direction == PCI_DMA_NONE)) {
+ if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
+ devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- pci_arena_free(&iommu->arena, entry, npages);
+ iommu_range_free(iommu, bus_addr, npages);
+ entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
do {
unsigned long num;
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
-
-static inline void fill_sg(long entry, unsigned long devhandle,
- struct scatterlist *sg,
- int nused, int nelems, unsigned long prot)
+static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
- struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
- int i, cpu, pglist_ent;
- u64 *pglist;
-
- cpu = get_cpu();
- pglist = &__get_cpu_var(iommu_pglists).pglist[0];
- pglist_ent = 0;
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg++;
- }
+ struct scatterlist *s, *outs, *segstart;
+ unsigned long flags, handle, prot;
+ dma_addr_t dma_next = 0, dma_addr;
+ unsigned int max_seg_size;
+ unsigned long seg_boundary_size;
+ int outcount, incount, i;
+ struct iommu *iommu;
+ unsigned long base_shift;
+ long err;
+
+ BUG_ON(direction == DMA_NONE);
+
+ iommu = dev->archdata.iommu;
+ if (nelems == 0 || !iommu)
+ return 0;
+
+ prot = HV_PCI_MAP_ATTR_READ;
+ if (direction != DMA_TO_DEVICE)
+ prot |= HV_PCI_MAP_ATTR_WRITE;
- pteval = (pteval & IOPTE_PAGE);
- while (len > 0) {
- pglist[pglist_ent++] = pteval;
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
+ outs = s = segstart = &sglist[0];
+ outcount = 1;
+ incount = nelems;
+ handle = 0;
- pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
+ /* Init first segment length for backout at failure */
+ outs->dma_length = 0;
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (sg < sg_end &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg++;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg++;
- }
+ spin_lock_irqsave(&iommu->lock, flags);
- BUG_ON(pglist_ent == 0);
+ iommu_batch_start(dev, prot, ~0UL);
- do {
- unsigned long num;
+ max_seg_size = dma_get_max_seg_size(dev);
+ seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ for_each_sg(sglist, s, nelems, i) {
+ unsigned long paddr, npages, entry, out_entry = 0, slen;
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- pglist_ent);
- entry += num;
- pglist_ent -= num;
- } while (pglist_ent != 0);
+ slen = s->length;
+ /* Sanity check */
+ if (slen == 0) {
+ dma_next = 0;
+ continue;
+ }
+ /* Allocate iommu entries for that segment */
+ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
+ npages = iommu_num_pages(paddr, slen);
+ entry = iommu_range_alloc(dev, iommu, npages, &handle);
+
+ /* Handle failure */
+ if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
+ " npages %lx\n", iommu, paddr, npages);
+ goto iommu_map_failed;
+ }
- put_cpu();
-}
+ iommu_batch_new_entry(entry);
-static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- unsigned long flags, npages, prot, devhandle;
- u32 dma_base;
- struct scatterlist *sgtmp;
- long entry;
- int used;
-
- /* Fast path single entry scatterlists. */
- if (nelems == 1) {
- sglist->dma_address =
- pci_4v_map_single(pdev,
- (page_address(sglist->page) + sglist->offset),
- sglist->length, direction);
- if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
- return 0;
- sglist->dma_length = sglist->length;
- return 1;
- }
+ /* Convert entry to a dma_addr_t */
+ dma_addr = iommu->page_table_map_base +
+ (entry << IO_PAGE_SHIFT);
+ dma_addr |= (s->offset & ~IO_PAGE_MASK);
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
-
- if (unlikely(direction == PCI_DMA_NONE))
- goto bad;
+ /* Insert into HW table */
+ paddr &= IO_PAGE_MASK;
+ while (npages--) {
+ err = iommu_batch_add(paddr);
+ if (unlikely(err < 0L))
+ goto iommu_map_failed;
+ paddr += IO_PAGE_SIZE;
+ }
- /* Step 1: Prepare scatter list. */
- npages = prepare_sg(sglist, nelems);
- if (unlikely(npages > PGLIST_NENTS))
- goto bad;
+ /* If we are in an open segment, try merging */
+ if (segstart != s) {
+ /* We cannot merge if:
+ * - allocated dma_addr isn't contiguous to previous allocation
+ */
+ if ((dma_addr != dma_next) ||
+ (outs->dma_length + s->length > max_seg_size) ||
+ (is_span_boundary(out_entry, base_shift,
+ seg_boundary_size, outs, s))) {
+ /* Can't merge: create a new segment */
+ segstart = s;
+ outcount++;
+ outs = sg_next(outs);
+ } else {
+ outs->dma_length += s->length;
+ }
+ }
- /* Step 2: Allocate a cluster and context, if necessary. */
- spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ if (segstart == s) {
+ /* This is a new segment, fill entries */
+ outs->dma_address = dma_addr;
+ outs->dma_length = slen;
+ out_entry = entry;
+ }
- if (unlikely(entry < 0L))
- goto bad;
+ /* Calculate next page pointer for contiguous check */
+ dma_next = dma_addr + slen;
+ }
- dma_base = iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT);
+ err = iommu_batch_end();
- /* Step 3: Normalize DMA addresses. */
- used = nelems;
+ if (unlikely(err < 0L))
+ goto iommu_map_failed;
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp++;
- used--;
+ if (outcount < incount) {
+ outs = sg_next(outs);
+ outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_length = 0;
}
- used = nelems - used;
- /* Step 4: Create the mappings. */
- prot = HV_PCI_MAP_ATTR_READ;
- if (direction != PCI_DMA_TODEVICE)
- prot |= HV_PCI_MAP_ATTR_WRITE;
+ return outcount;
- fill_sg(entry, devhandle, sglist, used, nelems, prot);
+iommu_map_failed:
+ for_each_sg(sglist, s, nelems, i) {
+ if (s->dma_length != 0) {
+ unsigned long vaddr, npages;
- return used;
+ vaddr = s->dma_address & IO_PAGE_MASK;
+ npages = iommu_num_pages(s->dma_address, s->dma_length);
+ iommu_range_free(iommu, vaddr, npages);
+ /* XXX demap? XXX */
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+ }
+ if (s == outs)
+ break;
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
-bad:
- if (printk_ratelimit())
- WARN_ON(1);
return 0;
}
-static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- unsigned long flags, i, npages, devhandle;
- long entry;
- u32 bus_addr;
+ struct pci_pbm_info *pbm;
+ struct scatterlist *sg;
+ struct iommu *iommu;
+ unsigned long flags;
+ u32 devhandle;
- if (unlikely(direction == PCI_DMA_NONE)) {
- if (printk_ratelimit())
- WARN_ON(1);
- }
+ BUG_ON(direction == DMA_NONE);
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
+ devhandle = pbm->devhandle;
- bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
- break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
+ spin_lock_irqsave(&iommu->lock, flags);
- entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ sg = sglist;
+ while (nelems--) {
+ dma_addr_t dma_handle = sg->dma_address;
+ unsigned int len = sg->dma_length;
+ unsigned long npages, entry;
- spin_lock_irqsave(&iommu->lock, flags);
+ if (!len)
+ break;
+ npages = iommu_num_pages(dma_handle, len);
+ iommu_range_free(iommu, dma_handle, npages);
- pci_arena_free(&iommu->arena, entry, npages);
+ entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ while (npages) {
+ unsigned long num;
- do {
- unsigned long num;
+ num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+ npages);
+ entry += num;
+ npages -= num;
+ }
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
+ sg = sg_next(sg);
+ }
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void dma_4v_sync_single_for_cpu(struct device *dev,
+ dma_addr_t bus_addr, size_t sz,
+ enum dma_data_direction direction)
{
/* Nothing to do... */
}
-static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void dma_4v_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
/* Nothing to do... */
}
-struct pci_iommu_ops pci_sun4v_iommu_ops = {
- .alloc_consistent = pci_4v_alloc_consistent,
- .free_consistent = pci_4v_free_consistent,
- .map_single = pci_4v_map_single,
- .unmap_single = pci_4v_unmap_single,
- .map_sg = pci_4v_map_sg,
- .unmap_sg = pci_4v_unmap_sg,
- .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
- .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
+static const struct dma_ops sun4v_dma_ops = {
+ .alloc_coherent = dma_4v_alloc_coherent,
+ .free_coherent = dma_4v_free_coherent,
+ .map_single = dma_4v_map_single,
+ .unmap_single = dma_4v_unmap_single,
+ .map_sg = dma_4v_map_sg,
+ .unmap_sg = dma_4v_unmap_sg,
+ .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
+ .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
};
-/* SUN4V PCI configuration space accessors. */
-
-static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
- int where, int size, u32 *value)
+static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
{
- struct pci_pbm_info *pbm = bus_dev->sysdata;
- unsigned long devhandle = pbm->devhandle;
- unsigned int bus = bus_dev->number;
- unsigned int device = PCI_SLOT(devfn);
- unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
-
- ret = pci_sun4v_config_get(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size);
- switch (size) {
- case 1:
- *value = ret & 0xff;
- break;
- case 2:
- *value = ret & 0xffff;
- break;
- case 4:
- *value = ret & 0xffffffff;
- break;
- };
+ struct property *prop;
+ struct device_node *dp;
+ dp = pbm->prom_node;
+ prop = of_find_property(dp, "66mhz-capable", NULL);
+ pbm->is_66mhz_capable = (prop != NULL);
+ pbm->pci_bus = pci_scan_one_pbm(pbm);
- return PCIBIOS_SUCCESSFUL;
+ /* XXX register error interrupt handlers XXX */
}
-static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
- int where, int size, u32 value)
+static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
+ struct iommu *iommu)
{
- struct pci_pbm_info *pbm = bus_dev->sysdata;
- unsigned long devhandle = pbm->devhandle;
- unsigned int bus = bus_dev->number;
- unsigned int device = PCI_SLOT(devfn);
- unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long i, cnt = 0;
+ u32 devhandle;
- ret = pci_sun4v_config_put(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size, value);
-
- return PCIBIOS_SUCCESSFUL;
-}
+ devhandle = pbm->devhandle;
+ for (i = 0; i < arena->limit; i++) {
+ unsigned long ret, io_attrs, ra;
-static struct pci_ops pci_sun4v_ops = {
- .read = pci_sun4v_read_pci_cfg,
- .write = pci_sun4v_write_pci_cfg,
-};
+ ret = pci_sun4v_iommu_getmap(devhandle,
+ HV_PCI_TSBID(0, i),
+ &io_attrs, &ra);
+ if (ret == HV_EOK) {
+ if (page_in_phys_avail(ra)) {
+ pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, i), 1);
+ } else {
+ cnt++;
+ __set_bit(i, arena->map);
+ }
+ }
+ }
+ return cnt;
+}
-static void pbm_scan_bus(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
+static int __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
- struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+ struct iommu *iommu = pbm->iommu;
+ struct property *prop;
+ unsigned long num_tsb_entries, sz, tsbsize;
+ u32 vdma[2], dma_mask, dma_offset;
+
+ prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
+ if (prop) {
+ u32 *val = prop->value;
- if (!cookie) {
- prom_printf("%s: Critical allocation failure.\n", pbm->name);
- prom_halt();
+ vdma[0] = val[0];
+ vdma[1] = val[1];
+ } else {
+ /* No property, use default values. */
+ vdma[0] = 0x80000000;
+ vdma[1] = 0x80000000;
}
- /* All we care about is the PBM. */
- memset(cookie, 0, sizeof(*cookie));
- cookie->pbm = pbm;
-
- pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
- p->pci_ops,
- pbm);
- pci_fixup_host_bridge_self(pbm->pci_bus);
- pbm->pci_bus->self->sysdata = cookie;
-
- pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, pbm->pci_bus);
- pci_assign_unassigned(pbm, pbm->pci_bus);
- pci_fixup_irq(pbm, pbm->pci_bus);
- pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
- pci_setup_busmastering(pbm, pbm->pci_bus);
-}
+ if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
+ printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
+ vdma[0], vdma[1]);
+ return -EINVAL;
+ };
-static void pci_sun4v_scan_bus(struct pci_controller_info *p)
-{
- if (p->pbm_A.prom_node) {
- p->pbm_A.is_66mhz_capable =
- prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
+ dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
+ num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
+ tsbsize = num_tsb_entries * sizeof(iopte_t);
- pbm_scan_bus(p, &p->pbm_A);
- }
- if (p->pbm_B.prom_node) {
- p->pbm_B.is_66mhz_capable =
- prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
+ dma_offset = vdma[0];
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->ctx_lowest_free = 1;
+ iommu->page_table_map_base = dma_offset;
+ iommu->dma_addr_mask = dma_mask;
- pbm_scan_bus(p, &p->pbm_B);
+ /* Allocate and initialize the free area map. */
+ sz = (num_tsb_entries + 7) / 8;
+ sz = (sz + 7UL) & ~7UL;
+ iommu->arena.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
+ printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
+ return -ENOMEM;
}
+ iommu->arena.limit = num_tsb_entries;
- /* XXX register error interrupt handlers XXX */
-}
+ sz = probe_existing_entries(pbm, iommu);
+ if (sz)
+ printk("%s: Imported %lu TSB entries from OBP\n",
+ pbm->name, sz);
-static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
- struct pci_dev *pdev,
- unsigned int ino)
-{
- /* XXX Implement me! XXX */
return 0;
}
-static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
+#ifdef CONFIG_PCI_MSI
+struct pci_sun4v_msiq_entry {
+ u64 version_type;
+#define MSIQ_VERSION_MASK 0xffffffff00000000UL
+#define MSIQ_VERSION_SHIFT 32
+#define MSIQ_TYPE_MASK 0x00000000000000ffUL
+#define MSIQ_TYPE_SHIFT 0
+#define MSIQ_TYPE_NONE 0x00
+#define MSIQ_TYPE_MSG 0x01
+#define MSIQ_TYPE_MSI32 0x02
+#define MSIQ_TYPE_MSI64 0x03
+#define MSIQ_TYPE_INTX 0x08
+#define MSIQ_TYPE_NONE2 0xff
+
+ u64 intx_sysino;
+ u64 reserved1;
+ u64 stick;
+ u64 req_id; /* bus/device/func */
+#define MSIQ_REQID_BUS_MASK 0xff00UL
+#define MSIQ_REQID_BUS_SHIFT 8
+#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
+#define MSIQ_REQID_DEVICE_SHIFT 3
+#define MSIQ_REQID_FUNC_MASK 0x0007UL
+#define MSIQ_REQID_FUNC_SHIFT 0
+
+ u64 msi_address;
+
+ /* The format of this value is message type dependent.
+ * For MSI bits 15:0 are the data from the MSI packet.
+ * For MSI-X bits 31:0 are the data from the MSI packet.
+ * For MSG, the message code and message routing code where:
+ * bits 39:32 is the bus/device/fn of the msg target-id
+ * bits 18:16 is the message routing code
+ * bits 7:0 is the message code
+ * For INTx the low order 2-bits are:
+ * 00 - INTA
+ * 01 - INTB
+ * 10 - INTC
+ * 11 - INTD
+ */
+ u64 msi_data;
+
+ u64 reserved2;
+};
+
+static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
- struct resource *res, *root;
- u32 reg;
- int where, size, is_64bit;
-
- res = &pdev->resource[resource];
- if (resource < 6) {
- where = PCI_BASE_ADDRESS_0 + (resource * 4);
- } else if (resource == PCI_ROM_RESOURCE) {
- where = pdev->rom_base_reg;
- } else {
- /* Somebody might have asked allocation of a non-standard resource */
- return;
- }
+ unsigned long err, limit;
- /* XXX 64-bit MEM handling is not %100 correct... XXX */
- is_64bit = 0;
- if (res->flags & IORESOURCE_IO)
- root = &pbm->io_space;
- else {
- root = &pbm->mem_space;
- if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- == PCI_BASE_ADDRESS_MEM_TYPE_64)
- is_64bit = 1;
- }
+ err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
+ if (unlikely(err))
+ return -ENXIO;
- size = res->end - res->start;
- pci_read_config_dword(pdev, where, ®);
- reg = ((reg & size) |
- (((u32)(res->start - root->start)) & ~size));
- if (resource == PCI_ROM_RESOURCE) {
- reg |= PCI_ROM_ADDRESS_ENABLE;
- res->flags |= IORESOURCE_ROM_ENABLE;
- }
- pci_write_config_dword(pdev, where, reg);
+ limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+ if (unlikely(*head >= limit))
+ return -EFBIG;
- /* This knows that the upper 32-bits of the address
- * must be zero. Our PCI common layer enforces this.
- */
- if (is_64bit)
- pci_write_config_dword(pdev, where + 4, 0);
+ return 0;
}
-static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
- struct resource *res,
- struct resource *root)
+static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
+ unsigned long msiqid, unsigned long *head,
+ unsigned long *msi)
{
- res->start += root->start;
- res->end += root->start;
-}
+ struct pci_sun4v_msiq_entry *ep;
+ unsigned long err, type;
-/* Use ranges property to determine where PCI MEM, I/O, and Config
- * space are for this PCI bus module.
- */
-static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
-{
- int i, saw_mem, saw_io;
-
- saw_mem = saw_io = 0;
- for (i = 0; i < pbm->num_pbm_ranges; i++) {
- struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
- unsigned long a;
- int type;
-
- type = (pr->child_phys_hi >> 24) & 0x3;
- a = (((unsigned long)pr->parent_phys_hi << 32UL) |
- ((unsigned long)pr->parent_phys_lo << 0UL));
-
- switch (type) {
- case 1:
- /* 16-bit IO space, 16MB */
- pbm->io_space.start = a;
- pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
- pbm->io_space.flags = IORESOURCE_IO;
- saw_io = 1;
- break;
+ /* Note: void pointer arithmetic, 'head' is a byte offset */
+ ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
+ (pbm->msiq_ent_count *
+ sizeof(struct pci_sun4v_msiq_entry))) +
+ *head);
- case 2:
- /* 32-bit MEM space, 2GB */
- pbm->mem_space.start = a;
- pbm->mem_space.end = a + (0x80000000UL - 1UL);
- pbm->mem_space.flags = IORESOURCE_MEM;
- saw_mem = 1;
- break;
+ if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
+ return 0;
- case 3:
- /* XXX 64-bit MEM handling XXX */
+ type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
+ if (unlikely(type != MSIQ_TYPE_MSI32 &&
+ type != MSIQ_TYPE_MSI64))
+ return -EINVAL;
- default:
- break;
- };
- }
+ *msi = ep->msi_data;
- if (!saw_io || !saw_mem) {
- prom_printf("%s: Fatal error, missing %s PBM range.\n",
- pbm->name,
- (!saw_io ? "IO" : "MEM"));
- prom_halt();
- }
+ err = pci_sun4v_msi_setstate(pbm->devhandle,
+ ep->msi_data /* msi_num */,
+ HV_MSISTATE_IDLE);
+ if (unlikely(err))
+ return -ENXIO;
+
+ /* Clear the entry. */
+ ep->version_type &= ~MSIQ_TYPE_MASK;
+
+ (*head) += sizeof(struct pci_sun4v_msiq_entry);
+ if (*head >=
+ (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
+ *head = 0;
- printk("%s: PCI IO[%lx] MEM[%lx]\n",
- pbm->name,
- pbm->io_space.start,
- pbm->mem_space.start);
+ return 1;
}
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
+static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long head)
{
- pbm->io_space.name = pbm->mem_space.name = pbm->name;
+ unsigned long err;
- request_resource(&ioport_resource, &pbm->io_space);
- request_resource(&iomem_resource, &pbm->mem_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
+ err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
+ if (unlikely(err))
+ return -EINVAL;
+
+ return 0;
}
-static void probe_existing_entries(struct pci_pbm_info *pbm,
- struct pci_iommu *iommu)
+static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long msi, int is_msi64)
{
- struct pci_iommu_arena *arena = &iommu->arena;
- unsigned long i, devhandle;
+ if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
+ (is_msi64 ?
+ HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
+ return -ENXIO;
+ if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
+ return -ENXIO;
+ if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
+ return -ENXIO;
+ return 0;
+}
- devhandle = pbm->devhandle;
- for (i = 0; i < arena->limit; i++) {
- unsigned long ret, io_attrs, ra;
+static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
+{
+ unsigned long err, msiqid;
- ret = pci_sun4v_iommu_getmap(devhandle,
- HV_PCI_TSBID(0, i),
- &io_attrs, &ra);
- if (ret == HV_EOK)
- __set_bit(i, arena->map);
+ err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
+ if (err)
+ return -ENXIO;
+
+ pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
+
+ return 0;
+}
+
+static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
+{
+ unsigned long q_size, alloc_size, pages, order;
+ int i;
+
+ q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+ alloc_size = (pbm->msiq_num * q_size);
+ order = get_order(alloc_size);
+ pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
+ if (pages == 0UL) {
+ printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
+ order);
+ return -ENOMEM;
}
+ memset((char *)pages, 0, PAGE_SIZE << order);
+ pbm->msi_queues = (void *) pages;
+
+ for (i = 0; i < pbm->msiq_num; i++) {
+ unsigned long err, base = __pa(pages + (i * q_size));
+ unsigned long ret1, ret2;
+
+ err = pci_sun4v_msiq_conf(pbm->devhandle,
+ pbm->msiq_first + i,
+ base, pbm->msiq_ent_count);
+ if (err) {
+ printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
+ err);
+ goto h_error;
+ }
+
+ err = pci_sun4v_msiq_info(pbm->devhandle,
+ pbm->msiq_first + i,
+ &ret1, &ret2);
+ if (err) {
+ printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
+ err);
+ goto h_error;
+ }
+ if (ret1 != base || ret2 != pbm->msiq_ent_count) {
+ printk(KERN_ERR "MSI: Bogus qconf "
+ "expected[%lx:%x] got[%lx:%lx]\n",
+ base, pbm->msiq_ent_count,
+ ret1, ret2);
+ goto h_error;
+ }
+ }
+
+ return 0;
+
+h_error:
+ free_pages(pages, order);
+ return -EINVAL;
}
-static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
+static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
{
- struct pci_iommu *iommu = pbm->iommu;
- unsigned long num_tsb_entries, sz;
- u32 vdma[2], dma_mask, dma_offset;
- int err, tsbsize;
+ unsigned long q_size, alloc_size, pages, order;
+ int i;
- err = prom_getproperty(pbm->prom_node, "virtual-dma",
- (char *)&vdma[0], sizeof(vdma));
- if (err == 0 || err == -1) {
- /* No property, use default values. */
- vdma[0] = 0x80000000;
- vdma[1] = 0x80000000;
+ for (i = 0; i < pbm->msiq_num; i++) {
+ unsigned long msiqid = pbm->msiq_first + i;
+
+ (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
}
- dma_mask = vdma[0];
- switch (vdma[1]) {
- case 0x20000000:
- dma_mask |= 0x1fffffff;
- tsbsize = 64;
- break;
+ q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+ alloc_size = (pbm->msiq_num * q_size);
+ order = get_order(alloc_size);
- case 0x40000000:
- dma_mask |= 0x3fffffff;
- tsbsize = 128;
- break;
+ pages = (unsigned long) pbm->msi_queues;
- case 0x80000000:
- dma_mask |= 0x7fffffff;
- tsbsize = 128;
- break;
+ free_pages(pages, order);
- default:
- prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
- prom_halt();
- };
+ pbm->msi_queues = NULL;
+}
- num_tsb_entries = tsbsize / sizeof(iopte_t);
+static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
+ unsigned long msiqid,
+ unsigned long devino)
+{
+ unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
- dma_offset = vdma[0];
+ if (!virt_irq)
+ return -ENOMEM;
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
- iommu->dma_addr_mask = dma_mask;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
+ if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
+ return -EINVAL;
- /* Allocate and initialize the free area map. */
- sz = num_tsb_entries / 8;
- sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kmalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
- prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
- prom_halt();
- }
- memset(iommu->arena.map, 0, sz);
- iommu->arena.limit = num_tsb_entries;
+ return virt_irq;
+}
- probe_existing_entries(pbm, iommu);
+static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
+ .get_head = pci_sun4v_get_head,
+ .dequeue_msi = pci_sun4v_dequeue_msi,
+ .set_head = pci_sun4v_set_head,
+ .msi_setup = pci_sun4v_msi_setup,
+ .msi_teardown = pci_sun4v_msi_teardown,
+ .msiq_alloc = pci_sun4v_msiq_alloc,
+ .msiq_free = pci_sun4v_msiq_free,
+ .msiq_build_irq = pci_sun4v_msiq_build_irq,
+};
+
+static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
+{
+ sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
+}
+#else /* CONFIG_PCI_MSI */
+static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
+{
}
+#endif /* !(CONFIG_PCI_MSI) */
-static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle)
+static int __init pci_sun4v_pbm_init(struct pci_controller_info *p,
+ struct device_node *dp, u32 devhandle)
{
struct pci_pbm_info *pbm;
- unsigned int busrange[2];
- int err, i;
+ int err;
if (devhandle & 0x40)
pbm = &p->pbm_B;
else
pbm = &p->pbm_A;
+ pbm->next = pci_pbm_root;
+ pci_pbm_root = pbm;
+
+ pbm->numa_node = of_node_to_nid(dp);
+
+ pbm->pci_ops = &sun4v_pci_ops;
+ pbm->config_space_reg_bits = 12;
+
+ pbm->index = pci_num_pbms++;
+
pbm->parent = p;
- pbm->prom_node = prom_node;
- pbm->pci_first_slot = 1;
+ pbm->prom_node = dp;
pbm->devhandle = devhandle;
- sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
- p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
+ pbm->name = dp->full_name;
- printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
+ printk("%s: SUN4V PCI Bus Module\n", pbm->name);
+ printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
- prom_getstring(prom_node, "name",
- pbm->prom_name, sizeof(pbm->prom_name));
+ pci_determine_mem_io_space(pbm);
- err = prom_getproperty(prom_node, "ranges",
- (char *) pbm->pbm_ranges,
- sizeof(pbm->pbm_ranges));
- if (err == 0 || err == -1) {
- prom_printf("%s: Fatal error, no ranges property.\n",
- pbm->name);
- prom_halt();
- }
+ pci_get_pbm_props(pbm);
- pbm->num_pbm_ranges =
- (err / sizeof(struct linux_prom_pci_ranges));
+ err = pci_sun4v_iommu_init(pbm);
+ if (err)
+ return err;
- /* Mask out the top 8 bits of the ranges, leaving the real
- * physical address.
- */
- for (i = 0; i < pbm->num_pbm_ranges; i++)
- pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
-
- pci_sun4v_determine_mem_io_space(pbm);
- pbm_register_toplevel_resources(p, pbm);
-
- err = prom_getproperty(prom_node, "interrupt-map",
- (char *)pbm->pbm_intmap,
- sizeof(pbm->pbm_intmap));
- if (err != -1) {
- pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
- err = prom_getproperty(prom_node, "interrupt-map-mask",
- (char *)&pbm->pbm_intmask,
- sizeof(pbm->pbm_intmask));
- if (err == -1) {
- prom_printf("%s: Fatal error, no "
- "interrupt-map-mask.\n", pbm->name);
- prom_halt();
- }
- } else {
- pbm->num_pbm_intmap = 0;
- memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
- }
+ pci_sun4v_msi_init(pbm);
- err = prom_getproperty(prom_node, "bus-range",
- (char *)&busrange[0],
- sizeof(busrange));
- if (err == 0 || err == -1) {
- prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
- prom_halt();
- }
- pbm->pci_first_busno = busrange[0];
- pbm->pci_last_busno = busrange[1];
+ pci_sun4v_scan_bus(pbm);
- pci_sun4v_iommu_init(pbm);
+ return 0;
}
-void sun4v_pci_init(int node, char *model_name)
+static int __devinit pci_sun4v_probe(struct of_device *op,
+ const struct of_device_id *match)
{
+ const struct linux_prom64_registers *regs;
+ static int hvapi_negotiated = 0;
struct pci_controller_info *p;
- struct pci_iommu *iommu;
- struct linux_prom64_registers regs;
- unsigned int devhandle;
+ struct pci_pbm_info *pbm;
+ struct device_node *dp;
+ struct iommu *iommu;
+ u32 devhandle;
+ int i;
+
+ dp = op->node;
+
+ if (!hvapi_negotiated++) {
+ int err = sun4v_hvapi_register(HV_GRP_PCI,
+ vpci_major,
+ &vpci_minor);
+
+ if (err) {
+ printk(KERN_ERR PFX "Could not register hvapi, "
+ "err=%d\n", err);
+ return err;
+ }
+ printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
+ vpci_major, vpci_minor);
- prom_getproperty(node, "reg", (char *)®s, sizeof(regs));
- devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;;
+ dma_ops = &sun4v_dma_ops;
+ }
- for (p = pci_controller_root; p; p = p->next) {
- struct pci_pbm_info *pbm;
+ regs = of_get_property(dp, "reg", NULL);
+ if (!regs) {
+ printk(KERN_ERR PFX "Could not find config registers\n");
+ return -ENODEV;
+ }
+ devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
- if (p->pbm_A.prom_node && p->pbm_B.prom_node)
- continue;
+ for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
+ if (pbm->devhandle == (devhandle ^ 0x40)) {
+ return pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
+ }
+ }
+
+ for_each_possible_cpu(i) {
+ unsigned long page = get_zeroed_page(GFP_ATOMIC);
- pbm = (p->pbm_A.prom_node ?
- &p->pbm_A :
- &p->pbm_B);
+ if (!page)
+ return -ENOMEM;
- if (pbm->devhandle == (devhandle ^ 0x40))
- pci_sun4v_pbm_init(p, node, devhandle);
+ per_cpu(iommu_batch, i).pglist = (u64 *) page;
}
- p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+ p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
if (!p) {
- prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
- prom_halt();
+ printk(KERN_ERR PFX "Could not allocate pci_controller_info\n");
+ goto out_free;
}
- memset(p, 0, sizeof(*p));
- iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu) {
- prom_printf("SCHIZO: Fatal memory allocation error.\n");
- prom_halt();
+ printk(KERN_ERR PFX "Could not allocate pbm A iommu\n");
+ goto out_free;
}
- memset(iommu, 0, sizeof(*iommu));
+
p->pbm_A.iommu = iommu;
- iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu) {
- prom_printf("SCHIZO: Fatal memory allocation error.\n");
- prom_halt();
+ printk(KERN_ERR PFX "Could not allocate pbm B iommu\n");
+ goto out_free;
}
- memset(iommu, 0, sizeof(*iommu));
- p->pbm_B.iommu = iommu;
- p->next = pci_controller_root;
- pci_controller_root = p;
+ p->pbm_B.iommu = iommu;
- p->index = pci_num_controllers++;
- p->pbms_same_domain = 0;
+ return pci_sun4v_pbm_init(p, dp, devhandle);
- p->scan_bus = pci_sun4v_scan_bus;
- p->irq_build = pci_sun4v_irq_build;
- p->base_address_update = pci_sun4v_base_address_update;
- p->resource_adjust = pci_sun4v_resource_adjust;
- p->pci_ops = &pci_sun4v_ops;
+out_free:
+ if (p) {
+ if (p->pbm_A.iommu)
+ kfree(p->pbm_A.iommu);
+ if (p->pbm_B.iommu)
+ kfree(p->pbm_B.iommu);
+ kfree(p);
+ }
+ return -ENOMEM;
+}
- /* Like PSYCHO and SCHIZO we have a 2GB aligned area
- * for memory space.
- */
- pci_memspace_mask = 0x7fffffffUL;
+static struct of_device_id __initdata pci_sun4v_match[] = {
+ {
+ .name = "pci",
+ .compatible = "SUNW,sun4v-pci",
+ },
+ {},
+};
- pci_sun4v_pbm_init(p, node, devhandle);
+static struct of_platform_driver pci_sun4v_driver = {
+ .name = DRIVER_NAME,
+ .match_table = pci_sun4v_match,
+ .probe = pci_sun4v_probe,
+};
- prom_printf("sun4v_pci_init: Implement me.\n");
- prom_halt();
+static int __init pci_sun4v_init(void)
+{
+ return of_register_driver(&pci_sun4v_driver, &of_bus_type);
}
+
+subsys_initcall(pci_sun4v_init);