#include "intel-iommu.h"
#include <asm/proto.h> /* force_iommu in this header in x86-64*/
#include <asm/cacheflush.h>
-#include <asm/iommu.h>
+#include <asm/gart.h>
#include "pci.h"
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
DMA_TLB_PSI_FLUSH, non_present_entry_flush);
}
+static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+{
+ u32 pmen;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ pmen = readl(iommu->reg + DMAR_PMEN_REG);
+ pmen &= ~DMA_PMEN_EPM;
+ writel(pmen, iommu->reg + DMAR_PMEN_REG);
+
+ /* wait for the protected region status bit to clear */
+ IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+ readl, !(pmen & DMA_PMEN_PRS), pmen);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
static int iommu_enable_translation(struct intel_iommu *iommu)
{
u32 sts;
/* iommu interrupt handling. Most stuff are MSI-like. */
-static char *fault_reason_strings[] =
+static const char *fault_reason_strings[] =
{
"Software",
"Present bit in root entry is clear",
"non-zero reserved fields in RTP",
"non-zero reserved fields in CTP",
"non-zero reserved fields in PTE",
- "Unknown"
};
-#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings)
+#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-char *dmar_get_fault_reason(u8 fault_reason)
+const char *dmar_get_fault_reason(u8 fault_reason)
{
if (fault_reason > MAX_FAULT_REASON_IDX)
- return fault_reason_strings[MAX_FAULT_REASON_IDX];
+ return "Unknown";
else
return fault_reason_strings[fault_reason];
}
static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
u8 fault_reason, u16 source_id, u64 addr)
{
- char *reason;
+ const char *reason;
reason = dmar_get_fault_reason(fault_reason);
return iommu;
error_unmap:
iounmap(iommu->reg);
- iommu->reg = 0;
error:
kfree(iommu);
return NULL;
int i;
u64 addr, size;
- init_iova_domain(&reserved_iova_list);
+ init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad);
+ init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
domain_reserve_special_ranges(domain);
iommu_flush_context_global(iommu, 0);
iommu_flush_iotlb_global(iommu, 0);
+ iommu_disable_protect_mem_regions(iommu);
+
ret = iommu_enable_translation(iommu);
if (ret)
goto error;
/*
* First try to allocate an io virtual address in
* DMA_32BIT_MASK and if that fails then try allocating
- * from higer range
+ * from higher range
*/
iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
if (!iova)
if (!domain) {
printk(KERN_ERR
"Allocating domain for %s failed", pci_name(pdev));
- return 0;
+ return NULL;
}
/* make sure context mapping is ok */
printk(KERN_ERR
"Domain context map for %s failed",
pci_name(pdev));
- return 0;
+ return NULL;
}
}
free_pages((unsigned long)vaddr, order);
}
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
int i;
struct iova *iova;
size_t size = 0;
void *addr;
+ struct scatterlist *sg;
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return;
domain = find_domain(pdev);
- iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address));
+ iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (!iova)
return;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
size += aligned_size((u64)addr, sg->length);
}
}
static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sg, int nelems, int dir)
+ struct scatterlist *sglist, int nelems, int dir)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nelems; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
- s->dma_length = s->length;
+ for_each_sg(sglist, sg, nelems, i) {
+ BUG_ON(!sg_page(sg));
+ sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+ sg->dma_length = sg->length;
}
return nelems;
}
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, int dir)
{
void *addr;
int i;
size_t offset = 0;
struct iova *iova = NULL;
int ret;
- struct scatterlist *orig_sg = sg;
+ struct scatterlist *sg;
unsigned long start_addr;
BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
+ return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
if (!domain)
return 0;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length);
iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova) {
- orig_sg->dma_length = 0;
+ sglist->dma_length = 0;
return 0;
}
start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
offset = 0;
- sg = orig_sg;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length);