* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_feature_sets.h>
static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
-extern int sn_force_interrupt_flag;
+int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
-static struct list_head **sn_irq_lh;
-static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
+struct list_head **sn_irq_lh;
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
-static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
- u64 sn_irq_info,
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
+
return ret_stuff.status;
}
-static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
(u64) sn_irq_info->irq_cookie, 0, 0);
}
+u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
+ nasid_t req_nasid, int req_slice)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_REDIRECT, (u64) local_nasid,
+ (u64) local_widget, __pa(sn_irq_info),
+ (u64) req_nasid, (u64) req_slice, 0);
+
+ return ret_stuff.status;
+}
+
static unsigned int sn_startup_irq(unsigned int irq)
{
return 0;
{
}
+extern void ia64_mca_register_cpev(int);
+
static void sn_disable_irq(unsigned int irq)
{
+ if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(0);
}
static void sn_enable_irq(unsigned int irq)
{
+ if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(irq);
}
static void sn_ack_irq(unsigned int irq)
{
- uint64_t event_occurred, mask = 0;
- int nasid;
+ u64 event_occurred, mask;
irq = irq & 0xff;
- nasid = get_nasid();
- event_occurred =
- HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
- HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
- mask);
+ HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_irq(irq);
+ move_native_irq(irq);
}
static void sn_end_irq(unsigned int irq)
{
- int nasid;
int ivec;
- uint64_t event_occurred;
+ u64 event_occurred;
ivec = irq & 0xff;
if (ivec == SGI_UART_VECTOR) {
- nasid = get_nasid();
- event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
- (nasid, SH_EVENT_OCCURRED));
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
/* If the UART bit is set here, we may have received an
* interrupt from the UART that the driver missed. To
* make sure, we IPI ourselves to force us to look again.
static void sn_irq_info_free(struct rcu_head *head);
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- int cpuid, cpuphys;
+ int vector;
+ int cpuid;
+#ifdef CONFIG_SMP
+ int cpuphys;
+#endif
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
+
+ bridge = (u64) sn_irq_info->irq_bridge;
+ if (!bridge) {
+ return NULL; /* irq is not a device interrupt */
+ }
- cpuid = first_cpu(mask);
- cpuphys = cpu_physical_id(cpuid);
+ local_nasid = NASID_GET(bridge);
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list) {
- uint64_t bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- break;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (uint64_t) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- break; /* irq is not a device interrupt */
- }
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
+ vector = sn_irq_info->irq_irq;
- local_nasid = NASID_GET(bridge);
+ /* Make use of SAL_INTR_REDIRECT if PROM supports it */
+ status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
+ if (!status) {
+ new_irq_info = sn_irq_info;
+ goto finish_up;
+ }
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
+ /*
+ * PROM does not support SAL_INTR_REDIRECT, or it failed.
+ * Revert to old method.
+ */
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
+
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ unregister_intr_pda(new_irq_info);
+
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
+
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
+ register_intr_pda(new_irq_info);
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- __pa(new_irq_info), irq,
- cpuid_to_nasid(cpuid),
- cpuid_to_slice(cpuid));
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- break;
- }
+finish_up:
+ /* Update kernels new_irq_info with new target info */
+ cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
+ new_irq_info->irq_slice);
+ new_irq_info->irq_cpuid = cpuid;
+
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
+
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
+
+#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpuid);
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
+#endif
- new_irq_info->irq_cpuid = cpuid;
- register_intr_pda(new_irq_info);
+ return new_irq_info;
+}
- if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type))
- pcibr_change_devices_irq(new_irq_info);
+static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(cpumask_first(mask));
+ slice = cpuid_to_slice(cpumask_first(mask));
- spin_lock(&sn_irq_info_lock);
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
- spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
+
+ return 0;
+}
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+void sn_set_err_irq_affinity(unsigned int irq)
+{
+ /*
+ * On systems which support CPU disabling (SHub2), all error interrupts
+ * are targetted at the boot CPU.
+ */
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
+ set_irq_affinity_info(irq, cpu_physical_id(0), 0);
+}
+#else
+void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
- }
+
+static void
+sn_mask_irq(unsigned int irq)
+{
+}
+
+static void
+sn_unmask_irq(unsigned int irq)
+{
}
-struct hw_interrupt_type irq_type_sn = {
- .typename = "SN hub",
+struct irq_chip irq_type_sn = {
+ .name = "SN hub",
.startup = sn_startup_irq,
.shutdown = sn_shutdown_irq,
.enable = sn_enable_irq,
.disable = sn_disable_irq,
.ack = sn_ack_irq,
.end = sn_end_irq,
+ .mask = sn_mask_irq,
+ .unmask = sn_unmask_irq,
.set_affinity = sn_set_affinity_irq
};
+ia64_vector sn_irq_to_vector(int irq)
+{
+ if (irq >= IA64_NUM_VECTORS)
+ return 0;
+ return (ia64_vector)irq;
+}
+
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
int i;
irq_desc_t *base_desc = irq_desc;
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].handler == &no_irq_type) {
- base_desc[i].handler = &irq_type_sn;
+ if (base_desc[i].chip == &no_irq_type) {
+ base_desc[i].chip = &irq_type_sn;
}
}
}
pdacpu(cpu)->sn_last_irq = irq;
}
- if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+ if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
pdacpu(cpu)->sn_first_irq = irq;
- }
}
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
+#ifdef CONFIG_SMP
+ int cpuphys;
+ irq_desc_t *desc;
+#endif
pci_dev_get(pci_dev);
-
sn_irq_info->irq_cpuid = cpu;
sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock);
- (void)register_intr_pda(sn_irq_info);
+ register_intr_pda(sn_irq_info);
+#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpu);
+ set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
+ desc = irq_to_desc(sn_irq_info->irq_irq);
+ /*
+ * Affinity was set by the PROM, prevent it from
+ * being reset by the request_irq() path.
+ */
+ desc->status |= IRQ_AFFINITY_SET;
+#endif
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
return;
sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
- if (!sn_irq_info || !sn_irq_info->irq_irq)
+ if (!sn_irq_info)
+ return;
+ if (!sn_irq_info->irq_irq) {
+ kfree(sn_irq_info);
return;
+ }
unregister_intr_pda(sn_irq_info);
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
-
pci_dev_put(pci_dev);
+
+}
+
+static inline void
+sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
+{
+ struct sn_pcibus_provider *pci_provider;
+
+ pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
+
+ /* Don't force an interrupt if the irq has been disabled */
+ if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
+ pci_provider && pci_provider->force_interrupt)
+ (*pci_provider->force_interrupt)(sn_irq_info);
}
static void force_interrupt(int irq)
return;
rcu_read_lock();
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) {
- if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
- (sn_irq_info->irq_bridge != NULL))
- pcibr_force_interrupt(sn_irq_info);
- }
+ list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+ sn_call_force_intr_provider(sn_irq_info);
+
rcu_read_unlock();
}
*/
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
- uint64_t regval;
- int irr_reg_num;
- int irr_bit;
- uint64_t irr_reg;
+ u64 regval;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
+ /*
+ * Bridge types attached to TIO (anything but PIC) do not need this WAR
+ * since they do not target Shub II interrupt registers. If that
+ * ever changes, this check needs to accomodate.
+ */
+ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
+ return;
+
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (!pcidev_info)
return;
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
- irr_reg_num = irq_to_vector(irq) / 64;
- irr_bit = irq_to_vector(irq) % 64;
- switch (irr_reg_num) {
- case 0:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
- break;
- case 1:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
- break;
- case 2:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
- break;
- case 3:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
- break;
- }
- if (!test_bit(irr_bit, &irr_reg)) {
- if (!test_bit(irq, pda->sn_soft_irr)) {
- if (!test_bit(irq, pda->sn_in_service_ivecs)) {
- regval &= 0xff;
- if (sn_irq_info->irq_int_bit & regval &
- sn_irq_info->irq_last_intr) {
- regval &=
- ~(sn_irq_info->
- irq_int_bit & regval);
- pcibr_force_interrupt(sn_irq_info);
- }
+ if (!ia64_get_irr(irq_to_vector(irq))) {
+ if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+ regval &= 0xff;
+ if (sn_irq_info->irq_int_bit & regval &
+ sn_irq_info->irq_last_intr) {
+ regval &= ~(sn_irq_info->irq_int_bit & regval);
+ sn_call_force_intr_provider(sn_irq_info);
}
}
}
rcu_read_lock();
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
- /*
- * Only call for PCI bridges that are fully
- * initialized.
- */
- if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
- (sn_irq_info->irq_bridge != NULL))
- sn_check_intr(i, sn_irq_info);
+ sn_check_intr(i, sn_irq_info);
}
}
rcu_read_unlock();
}
-void sn_irq_lh_init(void)
+void __init sn_irq_lh_init(void)
{
int i;
INIT_LIST_HEAD(sn_irq_lh[i]);
}
-
}