x86: SGI UV: Fix irq affinity for hub based interrupts
[safe/jmp/linux-2.6] / arch / x86 / kernel / uv_irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV IRQ functions
7  *
8  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9  */
10
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/irq.h>
14
15 #include <asm/apic.h>
16 #include <asm/uv/uv_irq.h>
17 #include <asm/uv/uv_hub.h>
18
19 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
20 struct uv_irq_2_mmr_pnode{
21         struct rb_node list;
22         unsigned long offset;
23         int pnode;
24         int irq;
25 };
26 static spinlock_t uv_irq_lock;
27 static struct rb_root uv_irq_root;
28
29 static void uv_noop(unsigned int irq)
30 {
31 }
32
33 static unsigned int uv_noop_ret(unsigned int irq)
34 {
35         return 0;
36 }
37
38 static void uv_ack_apic(unsigned int irq)
39 {
40         ack_APIC_irq();
41 }
42
43 struct irq_chip uv_irq_chip = {
44         .name           = "UV-CORE",
45         .startup        = uv_noop_ret,
46         .shutdown       = uv_noop,
47         .enable         = uv_noop,
48         .disable        = uv_noop,
49         .ack            = uv_noop,
50         .mask           = uv_noop,
51         .unmask         = uv_noop,
52         .eoi            = uv_ack_apic,
53         .end            = uv_noop,
54         .set_affinity   = uv_set_irq_affinity,
55 };
56
57 /*
58  * Add offset and pnode information of the hub sourcing interrupts to the
59  * rb tree for a specific irq.
60  */
61 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
62 {
63         struct rb_node **link = &uv_irq_root.rb_node;
64         struct rb_node *parent = NULL;
65         struct uv_irq_2_mmr_pnode *n;
66         struct uv_irq_2_mmr_pnode *e;
67         unsigned long irqflags;
68
69         n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
70                                 uv_blade_to_memory_nid(blade));
71         if (!n)
72                 return -ENOMEM;
73
74         n->irq = irq;
75         n->offset = offset;
76         n->pnode = uv_blade_to_pnode(blade);
77         spin_lock_irqsave(&uv_irq_lock, irqflags);
78         /* Find the right place in the rbtree: */
79         while (*link) {
80                 parent = *link;
81                 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
82
83                 if (unlikely(irq == e->irq)) {
84                         /* irq entry exists */
85                         e->pnode = uv_blade_to_pnode(blade);
86                         e->offset = offset;
87                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
88                         kfree(n);
89                         return 0;
90                 }
91
92                 if (irq < e->irq)
93                         link = &(*link)->rb_left;
94                 else
95                         link = &(*link)->rb_right;
96         }
97
98         /* Insert the node into the rbtree. */
99         rb_link_node(&n->list, parent, link);
100         rb_insert_color(&n->list, &uv_irq_root);
101
102         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
103         return 0;
104 }
105
106 /* Retrieve offset and pnode information from the rb tree for a specific irq */
107 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
108 {
109         struct uv_irq_2_mmr_pnode *e;
110         struct rb_node *n;
111         unsigned long irqflags;
112
113         spin_lock_irqsave(&uv_irq_lock, irqflags);
114         n = uv_irq_root.rb_node;
115         while (n) {
116                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
117
118                 if (e->irq == irq) {
119                         *offset = e->offset;
120                         *pnode = e->pnode;
121                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
122                         return 0;
123                 }
124
125                 if (irq < e->irq)
126                         n = n->rb_left;
127                 else
128                         n = n->rb_right;
129         }
130         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
131         return -1;
132 }
133
134 /*
135  * Set up a mapping of an available irq and vector, and enable the specified
136  * MMR that defines the MSI that is to be sent to the specified CPU when an
137  * interrupt is raised.
138  */
139 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
140                  unsigned long mmr_offset, int restrict)
141 {
142         int irq, ret;
143
144         irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
145
146         if (irq <= 0)
147                 return -EBUSY;
148
149         ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
150                 restrict);
151         if (ret == irq)
152                 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
153         else
154                 destroy_irq(irq);
155
156         return ret;
157 }
158 EXPORT_SYMBOL_GPL(uv_setup_irq);
159
160 /*
161  * Tear down a mapping of an irq and vector, and disable the specified MMR that
162  * defined the MSI that was to be sent to the specified CPU when an interrupt
163  * was raised.
164  *
165  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
166  */
167 void uv_teardown_irq(unsigned int irq)
168 {
169         struct uv_irq_2_mmr_pnode *e;
170         struct rb_node *n;
171         unsigned long irqflags;
172
173         spin_lock_irqsave(&uv_irq_lock, irqflags);
174         n = uv_irq_root.rb_node;
175         while (n) {
176                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
177                 if (e->irq == irq) {
178                         arch_disable_uv_irq(e->pnode, e->offset);
179                         rb_erase(n, &uv_irq_root);
180                         kfree(e);
181                         break;
182                 }
183                 if (irq < e->irq)
184                         n = n->rb_left;
185                 else
186                         n = n->rb_right;
187         }
188         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
189         destroy_irq(irq);
190 }
191 EXPORT_SYMBOL_GPL(uv_teardown_irq);