-/* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $
+/*
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* <zaitcev> Sounds reasonable
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h>
+#include <linux/scatterlist.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
#include <asm/oplib.h>
+#include <asm/prom.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
+#include <asm/iommu.h>
+#include <asm/io-unit.h>
+
+#include "dma.h"
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
-struct resource *_sparc_find_resource(struct resource *r, unsigned long);
+static struct resource *_sparc_find_resource(struct resource *r,
+ unsigned long);
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name);
static void _sparc_free_io(struct resource *res);
+static void register_proc_sparc_ioport(void);
+
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
}
}
-/*
- */
-void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset,
- unsigned long size, char *name)
+void __iomem *of_ioremap(struct resource *res, unsigned long offset,
+ unsigned long size, char *name)
{
- return _sparc_alloc_io(phyres->flags & 0xF,
- phyres->start + offset, size, name);
+ return _sparc_alloc_io(res->flags & 0xF,
+ res->start + offset,
+ size, name);
}
+EXPORT_SYMBOL(of_ioremap);
-/*
- */
-void sbus_iounmap(volatile void __iomem *addr, unsigned long size)
+void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
{
- iounmap(addr);
+ iounmap(base);
}
+EXPORT_SYMBOL(of_iounmap);
/*
* Meat of mapping
pa &= PAGE_MASK;
sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
- return (void __iomem *) (res->start + offset);
+ return (void __iomem *)(unsigned long)(res->start + offset);
}
/*
unsigned long plen;
plen = res->end - res->start + 1;
- if ((plen & (PAGE_SIZE-1)) != 0) BUG();
+ BUG_ON((plen & (PAGE_SIZE-1)) != 0);
sparc_unmapiorange(res->start, plen);
release_resource(res);
}
#ifdef CONFIG_SBUS
-void sbus_set_sbus64(struct sbus_dev *sdev, int x) {
+void sbus_set_sbus64(struct device *dev, int x)
+{
printk("sbus_set_sbus64: unsupported\n");
}
* Allocate a chunk of memory suitable for DMA.
* Typically devices use them for control blocks.
* CPU may access them without any explicit flushing.
- *
- * XXX Some clever people know that sdev is not used and supply NULL. Watch.
*/
-void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
+void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
{
+ struct of_device *op = to_of_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
struct resource *res;
int order;
- /* XXX why are some lenghts signed, others unsigned? */
+ /* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return NULL;
}
if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
goto err_nopages;
- if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
goto err_nomem;
- memset((char*)res, 0, sizeof(struct resource));
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
* XXX That's where sdev would be used. Currently we load
* all iommu tables with the same translations.
*/
- if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
+ if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;
- return (void *)res->start;
+ res->name = op->node->name;
+
+ return (void *)(unsigned long)res->start;
err_noiommu:
release_resource(res);
return NULL;
}
-void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
+void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
{
struct resource *res;
struct page *pgv;
kfree(res);
/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
- pgv = mmu_translate_dvma(ba);
- mmu_unmap_dma_area(ba, n);
+ pgv = virt_to_page(p);
+ mmu_unmap_dma_area(dev, ba, n);
__free_pages(pgv, get_order(n));
}
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary.
*/
-dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
+dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
{
- /* XXX why are some lenghts signed, others unsigned? */
+ /* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return 0;
}
if (len > 256*1024) { /* __get_free_pages() limit */
return 0;
}
- return mmu_get_scsi_one(va, len, sdev->bus);
+ return mmu_get_scsi_one(dev, va, len);
}
-void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
+void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
{
- mmu_release_scsi_one(ba, n, sdev->bus);
+ mmu_release_scsi_one(dev, ba, n);
}
-int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
+int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
{
- mmu_get_scsi_sgl(sg, n, sdev->bus);
+ mmu_get_scsi_sgl(dev, sg, n);
/*
* XXX sparc64 can return a partial length here. sun4c should do this
return n;
}
-void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
+void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
{
- mmu_release_scsi_sgl(sg, n, sdev->bus);
+ mmu_release_scsi_sgl(dev, sg, n);
}
-/*
- */
-void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
+void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
{
-#if 0
- unsigned long va;
- struct resource *res;
-
- /* We do not need the resource, just print a message if invalid. */
- res = _sparc_find_resource(&_sparc_dvma, ba);
- if (res == NULL)
- panic("sbus_dma_sync_single: 0x%x\n", ba);
-
- va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
- /*
- * XXX This bogosity will be fixed with the iommu rewrite coming soon
- * to a kernel near you. - Anton
- */
- /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
-#endif
}
-void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
+void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
{
-#if 0
- unsigned long va;
- struct resource *res;
-
- /* We do not need the resource, just print a message if invalid. */
- res = _sparc_find_resource(&_sparc_dvma, ba);
- if (res == NULL)
- panic("sbus_dma_sync_single: 0x%x\n", ba);
-
- va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
- /*
- * XXX This bogosity will be fixed with the iommu rewrite coming soon
- * to a kernel near you. - Anton
- */
- /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
-#endif
}
-void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
+static int __init sparc_register_ioport(void)
{
- printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
-}
+ register_proc_sparc_ioport();
-void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
-{
- printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
+ return 0;
}
+
+arch_initcall(sparc_register_ioport);
+
#endif /* CONFIG_SBUS */
#ifdef CONFIG_PCI
return NULL;
}
- if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
return NULL;
}
- memset((char*)res, 0, sizeof(struct resource));
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
* size must be the same as what as passed into pci_alloc_consistent,
* and likewise dma_addr must be the same as what *dma_addrp was set to.
*
- * References to the memory and mappings assosciated with cpu_addr/dma_addr
+ * References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
return virt_to_phys(ptr);
}
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
/* mmu_inval_dma_area XXX */
}
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
- for (n = 0; n < nents; n++) {
- if (page_address(sg->page) == NULL) BUG();
- sg->dvma_address = virt_to_phys(page_address(sg->page));
+ for_each_sg(sgl, sg, nents, n) {
+ BUG_ON(page_address(sg_page(sg)) == NULL);
+ sg->dvma_address = virt_to_phys(sg_virt(sg));
sg->dvma_length = sg->length;
- sg++;
}
return nents;
}
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
- if (page_address(sg->page) == NULL) BUG();
+ for_each_sg(sgl, sg, nents, n) {
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
*/
void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
- if (page_address(sg->page) == NULL) BUG();
+ for_each_sg(sgl, sg, nents, n) {
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
- if (direction == PCI_DMA_NONE)
- BUG();
+ BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
- if (page_address(sg->page) == NULL) BUG();
+ for_each_sg(sgl, sg, nents, n) {
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
if (p + 32 >= e) /* Better than nothing */
break;
if ((nm = r->name) == 0) nm = "???";
- p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
+ p += sprintf(p, "%016llx-%016llx: %s\n",
+ (unsigned long long)r->start,
+ (unsigned long long)r->end, nm);
}
return p-buf;
* XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
* This probably warrants some sort of hashing.
*/
-struct resource *
-_sparc_find_resource(struct resource *root, unsigned long hit)
+static struct resource *_sparc_find_resource(struct resource *root,
+ unsigned long hit)
{
struct resource *tmp;
return NULL;
}
-void register_proc_sparc_ioport(void)
+static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);