X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fpci%2Fmsi.c;h=77b68eaf021e43152c4bbd257bd190a4a210473c;hb=89713422a768458a0d375f0c2f3586cd5ccde6a1;hp=944e45e4a84f369636aad2f537c4f04c3855ccde;hpb=f598282f5145036312d90875d0ed5c14b49fd8a7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 944e45e..77b68ea 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -16,9 +16,9 @@ #include #include #include - -#include -#include +#include +#include +#include #include "pci.h" #include "msi.h" @@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { u32 mask_bits = desc->masked; if (!desc->msi_attrib.maskbit) - return; + return 0; mask_bits &= ~mask; mask_bits |= flag; pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +{ + desc->masked = __msi_mask_irq(desc, mask, flag); } /* @@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) * file. This saves a few milliseconds when initialising devices with lots * of MSI-X interrupts. */ -static void msix_mask_irq(struct msi_desc *desc, u32 flag) +static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; + PCI_MSIX_ENTRY_VECTOR_CTRL; mask_bits &= ~1; mask_bits |= flag; writel(mask_bits, desc->mask_base + offset); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msix_mask_irq(struct msi_desc *desc, u32 flag) +{ + desc->masked = __msix_mask_irq(desc, flag); } static void msi_set_mask_bit(unsigned irq, u32 flag) @@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __iomem *base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - writel(msg->address_lo, - base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(msg->address_hi, - base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -262,7 +272,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) write_msi_msg_desc(desc, msg); } -static int msi_free_irqs(struct pci_dev* dev); +static void free_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *entry, *tmp; + + list_for_each_entry(entry, &dev->msi_list, list) { + int i, nvec; + if (!entry->irq) + continue; + nvec = 1 << entry->msi_attrib.multiple; + for (i = 0; i < nvec; i++) + BUG_ON(irq_has_action(entry->irq + i)); + } + + arch_teardown_msi_irqs(dev); + + list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { + if (entry->msi_attrib.is_msix) { + if (list_is_last(&entry->list, &dev->msi_list)) + iounmap(entry->mask_base); + } + list_del(&entry->list); + kfree(entry); + } +} static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) { @@ -314,7 +347,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev) if (!dev->msix_enabled) return; BUG_ON(list_empty(&dev->msi_list)); - entry = list_entry(dev->msi_list.next, struct msi_desc, list); + entry = list_first_entry(&dev->msi_list, struct msi_desc, list); pos = entry->msi_attrib.pos; pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); @@ -357,7 +390,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) u16 control; unsigned mask; - pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -366,12 +399,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) if (!entry) return -ENOMEM; - entry->msi_attrib.is_msix = 0; - entry->msi_attrib.is_64 = is_64bit_address(control); - entry->msi_attrib.entry_nr = 0; - entry->msi_attrib.maskbit = is_mask_bit_support(control); - entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ - entry->msi_attrib.pos = pos; + entry->msi_attrib.is_msix = 0; + entry->msi_attrib.is_64 = is_64bit_address(control); + entry->msi_attrib.entry_nr = 0; + entry->msi_attrib.maskbit = is_mask_bit_support(control); + entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ + entry->msi_attrib.pos = pos; entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); /* All MSIs are unmasked by default, Mask them all */ @@ -385,7 +418,8 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Configure MSI capability structure */ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { - msi_free_irqs(dev); + msi_mask_irq(entry, mask, ~mask); + free_msi_irqs(dev); return ret; } @@ -398,6 +432,70 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) return 0; } +static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, + unsigned nr_entries) +{ + unsigned long phys_addr; + u32 table_offset; + u8 bir; + + pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); + bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); + table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; + phys_addr = pci_resource_start(dev, bir) + table_offset; + + return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); +} + +static int msix_setup_entries(struct pci_dev *dev, unsigned pos, + void __iomem *base, struct msix_entry *entries, + int nvec) +{ + struct msi_desc *entry; + int i; + + for (i = 0; i < nvec; i++) { + entry = alloc_msi_entry(dev); + if (!entry) { + if (!i) + iounmap(base); + else + free_msi_irqs(dev); + /* No enough memory. Don't try again */ + return -ENOMEM; + } + + entry->msi_attrib.is_msix = 1; + entry->msi_attrib.is_64 = 1; + entry->msi_attrib.entry_nr = entries[i].entry; + entry->msi_attrib.default_irq = dev->irq; + entry->msi_attrib.pos = pos; + entry->mask_base = base; + + list_add_tail(&entry->list, &dev->msi_list); + } + + return 0; +} + +static void msix_program_entries(struct pci_dev *dev, + struct msix_entry *entries) +{ + struct msi_desc *entry; + int i = 0; + + list_for_each_entry(entry, &dev->msi_list, list) { + int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + + entries[i].vector = entry->irq; + set_irq_msi(entry->irq, entry); + entry->masked = readl(entry->mask_base + offset); + msix_mask_irq(entry, 1); + i++; + } +} + /** * msix_capability_init - configure device's MSI-X capability * @dev: pointer to the pci_dev data structure of MSI-X device function @@ -411,15 +509,11 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) { - struct msi_desc *entry; - int pos, i, j, nr_entries, ret; - unsigned long phys_addr; - u32 table_offset; - u16 control; - u8 bir; + int pos, ret; + u16 control; void __iomem *base; - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); /* Ensure MSI-X is disabled while it is set up */ @@ -427,51 +521,17 @@ static int msix_capability_init(struct pci_dev *dev, pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); /* Request & Map MSI-X table region */ - nr_entries = multi_msix_capable(control); - - pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); - bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; - phys_addr = pci_resource_start (dev, bir) + table_offset; - base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); - if (base == NULL) + base = msix_map_region(dev, pos, multi_msix_capable(control)); + if (!base) return -ENOMEM; - for (i = 0; i < nvec; i++) { - entry = alloc_msi_entry(dev); - if (!entry) - break; - - j = entries[i].entry; - entry->msi_attrib.is_msix = 1; - entry->msi_attrib.is_64 = 1; - entry->msi_attrib.entry_nr = j; - entry->msi_attrib.default_irq = dev->irq; - entry->msi_attrib.pos = pos; - entry->mask_base = base; - - list_add_tail(&entry->list, &dev->msi_list); - } + ret = msix_setup_entries(dev, pos, base, entries, nvec); + if (ret) + return ret; ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); - if (ret < 0) { - /* If we had some success report the number of irqs - * we succeeded in setting up. */ - int avail = 0; - list_for_each_entry(entry, &dev->msi_list, list) { - if (entry->irq != 0) { - avail++; - } - } - - if (avail != 0) - ret = avail; - } - - if (ret) { - msi_free_irqs(dev); - return ret; - } + if (ret) + goto error; /* * Some devices require MSI-X to be enabled before we can touch the @@ -481,16 +541,7 @@ static int msix_capability_init(struct pci_dev *dev, control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); - i = 0; - list_for_each_entry(entry, &dev->msi_list, list) { - entries[i].vector = entry->irq; - set_irq_msi(entry->irq, entry); - j = entries[i].entry; - entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); - msix_mask_irq(entry, 1); - i++; - } + msix_program_entries(dev, entries); /* Set MSI-X enabled bits and unmask the function */ pci_intx_for_msi(dev, 0); @@ -500,6 +551,27 @@ static int msix_capability_init(struct pci_dev *dev, pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); return 0; + +error: + if (ret < 0) { + /* + * If we had some success, report the number of irqs + * we succeeded in setting up. + */ + struct msi_desc *entry; + int avail = 0; + + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq != 0) + avail++; + } + if (avail != 0) + ret = avail; + } + + free_msi_irqs(dev); + + return ret; } /** @@ -512,7 +584,7 @@ static int msix_capability_init(struct pci_dev *dev, * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 0, else return an error code. **/ -static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) +static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) { struct pci_bus *bus; int ret; @@ -529,8 +601,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) if (nvec < 1) return -ERANGE; - /* Any bridge which does NOT route MSI transactions from it's - * secondary bus to it's primary bus must set NO_MSI flag on + /* + * Any bridge which does NOT route MSI transactions from its + * secondary bus to its primary bus must set NO_MSI flag on * the secondary pci_bus. * We expect only arch-specific PCI host bus controller driver * or quirks for specific PCI bridges to be setting NO_MSI. @@ -611,62 +684,26 @@ void pci_msi_shutdown(struct pci_dev *dev) pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; + /* Return the device with MSI unmasked as initial states */ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); mask = msi_capable_mask(ctrl); - msi_mask_irq(desc, mask, ~mask); + /* Keep cached state to be restored */ + __msi_mask_irq(desc, mask, ~mask); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; } -void pci_disable_msi(struct pci_dev* dev) +void pci_disable_msi(struct pci_dev *dev) { - struct msi_desc *entry; - if (!pci_msi_enable || !dev || !dev->msi_enabled) return; pci_msi_shutdown(dev); - - entry = list_entry(dev->msi_list.next, struct msi_desc, list); - if (entry->msi_attrib.is_msix) - return; - - msi_free_irqs(dev); + free_msi_irqs(dev); } EXPORT_SYMBOL(pci_disable_msi); -static int msi_free_irqs(struct pci_dev* dev) -{ - struct msi_desc *entry, *tmp; - - list_for_each_entry(entry, &dev->msi_list, list) { - int i, nvec; - if (!entry->irq) - continue; - nvec = 1 << entry->msi_attrib.multiple; - for (i = 0; i < nvec; i++) - BUG_ON(irq_has_action(entry->irq + i)); - } - - arch_teardown_msi_irqs(dev); - - list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { - if (entry->msi_attrib.is_msix) { - writel(1, entry->mask_base + entry->msi_attrib.entry_nr - * PCI_MSIX_ENTRY_SIZE - + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); - - if (list_is_last(&entry->list, &dev->msi_list)) - iounmap(entry->mask_base); - } - list_del(&entry->list); - kfree(entry); - } - - return 0; -} - /** * pci_msix_table_size - return the number of device's MSI-X table entries * @dev: pointer to the pci_dev data structure of MSI-X device function @@ -699,13 +736,13 @@ int pci_msix_table_size(struct pci_dev *dev) * of irqs or MSI-X vectors available. Driver should use the returned value to * re-send its request. **/ -int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) +int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) { int status, nr_entries; int i, j; if (!entries) - return -EINVAL; + return -EINVAL; status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); if (status) @@ -727,7 +764,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) WARN_ON(!!dev->msix_enabled); /* Check whether driver already requested for MSI irq */ - if (dev->msi_enabled) { + if (dev->msi_enabled) { dev_info(&dev->dev, "can't enable MSI-X " "(MSI IRQ already assigned)\n"); return -EINVAL; @@ -737,28 +774,31 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) } EXPORT_SYMBOL(pci_enable_msix); -static void msix_free_all_irqs(struct pci_dev *dev) +void pci_msix_shutdown(struct pci_dev *dev) { - msi_free_irqs(dev); -} + struct msi_desc *entry; -void pci_msix_shutdown(struct pci_dev* dev) -{ if (!pci_msi_enable || !dev || !dev->msix_enabled) return; + /* Return the device with MSI-X masked as initial states */ + list_for_each_entry(entry, &dev->msi_list, list) { + /* Keep cached states to be restored */ + __msix_mask_irq(entry, 1); + } + msix_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; } -void pci_disable_msix(struct pci_dev* dev) + +void pci_disable_msix(struct pci_dev *dev) { if (!pci_msi_enable || !dev || !dev->msix_enabled) return; pci_msix_shutdown(dev); - - msix_free_all_irqs(dev); + free_msi_irqs(dev); } EXPORT_SYMBOL(pci_disable_msix); @@ -771,16 +811,13 @@ EXPORT_SYMBOL(pci_disable_msix); * allocated for this device function, are reclaimed to unused state, * which may be used later on. **/ -void msi_remove_pci_irq_vectors(struct pci_dev* dev) +void msi_remove_pci_irq_vectors(struct pci_dev *dev) { if (!pci_msi_enable || !dev) - return; - - if (dev->msi_enabled) - msi_free_irqs(dev); + return; - if (dev->msix_enabled) - msix_free_all_irqs(dev); + if (dev->msi_enabled || dev->msix_enabled) + free_msi_irqs(dev); } void pci_no_msi(void)