2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/irq.h>
16 #include <asm/uv/uv_irq.h>
17 #include <asm/uv/uv_hub.h>
19 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
20 struct uv_irq_2_mmr_pnode{
26 static spinlock_t uv_irq_lock;
27 static struct rb_root uv_irq_root;
29 static void uv_noop(unsigned int irq)
33 static unsigned int uv_noop_ret(unsigned int irq)
38 static void uv_ack_apic(unsigned int irq)
43 struct irq_chip uv_irq_chip = {
45 .startup = uv_noop_ret,
54 .set_affinity = uv_set_irq_affinity,
58 * Add offset and pnode information of the hub sourcing interrupts to the
59 * rb tree for a specific irq.
61 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
63 struct rb_node **link = &uv_irq_root.rb_node;
64 struct rb_node *parent = NULL;
65 struct uv_irq_2_mmr_pnode *n;
66 struct uv_irq_2_mmr_pnode *e;
67 unsigned long irqflags;
69 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
70 uv_blade_to_memory_nid(blade));
76 n->pnode = uv_blade_to_pnode(blade);
77 spin_lock_irqsave(&uv_irq_lock, irqflags);
78 /* Find the right place in the rbtree: */
81 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
83 if (unlikely(irq == e->irq)) {
84 /* irq entry exists */
85 e->pnode = uv_blade_to_pnode(blade);
87 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
93 link = &(*link)->rb_left;
95 link = &(*link)->rb_right;
98 /* Insert the node into the rbtree. */
99 rb_link_node(&n->list, parent, link);
100 rb_insert_color(&n->list, &uv_irq_root);
102 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
106 /* Retrieve offset and pnode information from the rb tree for a specific irq */
107 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
109 struct uv_irq_2_mmr_pnode *e;
111 unsigned long irqflags;
113 spin_lock_irqsave(&uv_irq_lock, irqflags);
114 n = uv_irq_root.rb_node;
116 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
121 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
130 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
135 * Set up a mapping of an available irq and vector, and enable the specified
136 * MMR that defines the MSI that is to be sent to the specified CPU when an
137 * interrupt is raised.
139 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
140 unsigned long mmr_offset, int restrict)
144 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
149 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
152 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
158 EXPORT_SYMBOL_GPL(uv_setup_irq);
161 * Tear down a mapping of an irq and vector, and disable the specified MMR that
162 * defined the MSI that was to be sent to the specified CPU when an interrupt
165 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
167 void uv_teardown_irq(unsigned int irq)
169 struct uv_irq_2_mmr_pnode *e;
171 unsigned long irqflags;
173 spin_lock_irqsave(&uv_irq_lock, irqflags);
174 n = uv_irq_root.rb_node;
176 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
178 arch_disable_uv_irq(e->pnode, e->offset);
179 rb_erase(n, &uv_irq_root);
188 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
191 EXPORT_SYMBOL_GPL(uv_teardown_irq);