ipmi: run to completion fixes
[safe/jmp/linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 #define SI_MAX_PARMS 4
239
240 static int force_kipmid[SI_MAX_PARMS];
241 static int num_force_kipmid;
242
243 static int unload_when_empty = 1;
244
245 static int try_smi_init(struct smi_info *smi);
246 static void cleanup_one_si(struct smi_info *to_clean);
247
248 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249 static int register_xaction_notifier(struct notifier_block * nb)
250 {
251         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252 }
253
254 static void deliver_recv_msg(struct smi_info *smi_info,
255                              struct ipmi_smi_msg *msg)
256 {
257         /* Deliver the message to the upper layer with the lock
258            released. */
259         spin_unlock(&(smi_info->si_lock));
260         ipmi_smi_msg_received(smi_info->intf, msg);
261         spin_lock(&(smi_info->si_lock));
262 }
263
264 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
265 {
266         struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
268         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269                 cCode = IPMI_ERR_UNSPECIFIED;
270         /* else use it as is */
271
272         /* Make it a reponse */
273         msg->rsp[0] = msg->data[0] | 4;
274         msg->rsp[1] = msg->data[1];
275         msg->rsp[2] = cCode;
276         msg->rsp_size = 3;
277
278         smi_info->curr_msg = NULL;
279         deliver_recv_msg(smi_info, msg);
280 }
281
282 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283 {
284         int              rv;
285         struct list_head *entry = NULL;
286 #ifdef DEBUG_TIMING
287         struct timeval t;
288 #endif
289
290         /* No need to save flags, we aleady have interrupts off and we
291            already hold the SMI lock. */
292         spin_lock(&(smi_info->msg_lock));
293
294         /* Pick the high priority queue first. */
295         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
296                 entry = smi_info->hp_xmit_msgs.next;
297         } else if (!list_empty(&(smi_info->xmit_msgs))) {
298                 entry = smi_info->xmit_msgs.next;
299         }
300
301         if (!entry) {
302                 smi_info->curr_msg = NULL;
303                 rv = SI_SM_IDLE;
304         } else {
305                 int err;
306
307                 list_del(entry);
308                 smi_info->curr_msg = list_entry(entry,
309                                                 struct ipmi_smi_msg,
310                                                 link);
311 #ifdef DEBUG_TIMING
312                 do_gettimeofday(&t);
313                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314 #endif
315                 err = atomic_notifier_call_chain(&xaction_notifier_list,
316                                 0, smi_info);
317                 if (err & NOTIFY_STOP_MASK) {
318                         rv = SI_SM_CALL_WITHOUT_DELAY;
319                         goto out;
320                 }
321                 err = smi_info->handlers->start_transaction(
322                         smi_info->si_sm,
323                         smi_info->curr_msg->data,
324                         smi_info->curr_msg->data_size);
325                 if (err) {
326                         return_hosed_msg(smi_info, err);
327                 }
328
329                 rv = SI_SM_CALL_WITHOUT_DELAY;
330         }
331         out:
332         spin_unlock(&(smi_info->msg_lock));
333
334         return rv;
335 }
336
337 static void start_enable_irq(struct smi_info *smi_info)
338 {
339         unsigned char msg[2];
340
341         /* If we are enabling interrupts, we have to tell the
342            BMC to use them. */
343         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348 }
349
350 static void start_disable_irq(struct smi_info *smi_info)
351 {
352         unsigned char msg[2];
353
354         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359 }
360
361 static void start_clear_flags(struct smi_info *smi_info)
362 {
363         unsigned char msg[3];
364
365         /* Make sure the watchdog pre-timeout flag is not set at startup. */
366         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368         msg[2] = WDT_PRE_TIMEOUT_INT;
369
370         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371         smi_info->si_state = SI_CLEARING_FLAGS;
372 }
373
374 /* When we have a situtaion where we run out of memory and cannot
375    allocate messages, we just leave them in the BMC and run the system
376    polled until we can allocate some memory.  Once we have some
377    memory, we will re-enable the interrupt. */
378 static inline void disable_si_irq(struct smi_info *smi_info)
379 {
380         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
381                 start_disable_irq(smi_info);
382                 smi_info->interrupt_disabled = 1;
383         }
384 }
385
386 static inline void enable_si_irq(struct smi_info *smi_info)
387 {
388         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
389                 start_enable_irq(smi_info);
390                 smi_info->interrupt_disabled = 0;
391         }
392 }
393
394 static void handle_flags(struct smi_info *smi_info)
395 {
396  retry:
397         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398                 /* Watchdog pre-timeout */
399                 spin_lock(&smi_info->count_lock);
400                 smi_info->watchdog_pretimeouts++;
401                 spin_unlock(&smi_info->count_lock);
402
403                 start_clear_flags(smi_info);
404                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405                 spin_unlock(&(smi_info->si_lock));
406                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407                 spin_lock(&(smi_info->si_lock));
408         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409                 /* Messages available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_MESSAGES;
427         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428                 /* Events available. */
429                 smi_info->curr_msg = ipmi_alloc_smi_msg();
430                 if (!smi_info->curr_msg) {
431                         disable_si_irq(smi_info);
432                         smi_info->si_state = SI_NORMAL;
433                         return;
434                 }
435                 enable_si_irq(smi_info);
436
437                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439                 smi_info->curr_msg->data_size = 2;
440
441                 smi_info->handlers->start_transaction(
442                         smi_info->si_sm,
443                         smi_info->curr_msg->data,
444                         smi_info->curr_msg->data_size);
445                 smi_info->si_state = SI_GETTING_EVENTS;
446         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447                    smi_info->oem_data_avail_handler) {
448                 if (smi_info->oem_data_avail_handler(smi_info))
449                         goto retry;
450         } else {
451                 smi_info->si_state = SI_NORMAL;
452         }
453 }
454
455 static void handle_transaction_done(struct smi_info *smi_info)
456 {
457         struct ipmi_smi_msg *msg;
458 #ifdef DEBUG_TIMING
459         struct timeval t;
460
461         do_gettimeofday(&t);
462         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463 #endif
464         switch (smi_info->si_state) {
465         case SI_NORMAL:
466                 if (!smi_info->curr_msg)
467                         break;
468
469                 smi_info->curr_msg->rsp_size
470                         = smi_info->handlers->get_result(
471                                 smi_info->si_sm,
472                                 smi_info->curr_msg->rsp,
473                                 IPMI_MAX_MSG_LENGTH);
474
475                 /* Do this here becase deliver_recv_msg() releases the
476                    lock, and a new message can be put in during the
477                    time the lock is released. */
478                 msg = smi_info->curr_msg;
479                 smi_info->curr_msg = NULL;
480                 deliver_recv_msg(smi_info, msg);
481                 break;
482
483         case SI_GETTING_FLAGS:
484         {
485                 unsigned char msg[4];
486                 unsigned int  len;
487
488                 /* We got the flags from the SMI, now handle them. */
489                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490                 if (msg[2] != 0) {
491                         /* Error fetching flags, just give up for
492                            now. */
493                         smi_info->si_state = SI_NORMAL;
494                 } else if (len < 4) {
495                         /* Hmm, no flags.  That's technically illegal, but
496                            don't use uninitialized data. */
497                         smi_info->si_state = SI_NORMAL;
498                 } else {
499                         smi_info->msg_flags = msg[3];
500                         handle_flags(smi_info);
501                 }
502                 break;
503         }
504
505         case SI_CLEARING_FLAGS:
506         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507         {
508                 unsigned char msg[3];
509
510                 /* We cleared the flags. */
511                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512                 if (msg[2] != 0) {
513                         /* Error clearing flags */
514                         printk(KERN_WARNING
515                                "ipmi_si: Error clearing flags: %2.2x\n",
516                                msg[2]);
517                 }
518                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519                         start_enable_irq(smi_info);
520                 else
521                         smi_info->si_state = SI_NORMAL;
522                 break;
523         }
524
525         case SI_GETTING_EVENTS:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the event flag. */
543                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->events++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_GETTING_MESSAGES:
562         {
563                 smi_info->curr_msg->rsp_size
564                         = smi_info->handlers->get_result(
565                                 smi_info->si_sm,
566                                 smi_info->curr_msg->rsp,
567                                 IPMI_MAX_MSG_LENGTH);
568
569                 /* Do this here becase deliver_recv_msg() releases the
570                    lock, and a new message can be put in during the
571                    time the lock is released. */
572                 msg = smi_info->curr_msg;
573                 smi_info->curr_msg = NULL;
574                 if (msg->rsp[2] != 0) {
575                         /* Error getting event, probably done. */
576                         msg->done(msg);
577
578                         /* Take off the msg flag. */
579                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580                         handle_flags(smi_info);
581                 } else {
582                         spin_lock(&smi_info->count_lock);
583                         smi_info->incoming_messages++;
584                         spin_unlock(&smi_info->count_lock);
585
586                         /* Do this before we deliver the message
587                            because delivering the message releases the
588                            lock and something else can mess with the
589                            state. */
590                         handle_flags(smi_info);
591
592                         deliver_recv_msg(smi_info, msg);
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS1:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed get, using polled mode.\n");
607                         smi_info->si_state = SI_NORMAL;
608                 } else {
609                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
611                         msg[2] = (msg[3] |
612                                   IPMI_BMC_RCV_MSG_INTR |
613                                   IPMI_BMC_EVT_MSG_INTR);
614                         smi_info->handlers->start_transaction(
615                                 smi_info->si_sm, msg, 3);
616                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617                 }
618                 break;
619         }
620
621         case SI_ENABLE_INTERRUPTS2:
622         {
623                 unsigned char msg[4];
624
625                 /* We got the flags from the SMI, now handle them. */
626                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627                 if (msg[2] != 0) {
628                         printk(KERN_WARNING
629                                "ipmi_si: Could not enable interrupts"
630                                ", failed set, using polled mode.\n");
631                 }
632                 smi_info->si_state = SI_NORMAL;
633                 break;
634         }
635
636         case SI_DISABLE_INTERRUPTS1:
637         {
638                 unsigned char msg[4];
639
640                 /* We got the flags from the SMI, now handle them. */
641                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642                 if (msg[2] != 0) {
643                         printk(KERN_WARNING
644                                "ipmi_si: Could not disable interrupts"
645                                ", failed get.\n");
646                         smi_info->si_state = SI_NORMAL;
647                 } else {
648                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650                         msg[2] = (msg[3] &
651                                   ~(IPMI_BMC_RCV_MSG_INTR |
652                                     IPMI_BMC_EVT_MSG_INTR));
653                         smi_info->handlers->start_transaction(
654                                 smi_info->si_sm, msg, 3);
655                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656                 }
657                 break;
658         }
659
660         case SI_DISABLE_INTERRUPTS2:
661         {
662                 unsigned char msg[4];
663
664                 /* We got the flags from the SMI, now handle them. */
665                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666                 if (msg[2] != 0) {
667                         printk(KERN_WARNING
668                                "ipmi_si: Could not disable interrupts"
669                                ", failed set.\n");
670                 }
671                 smi_info->si_state = SI_NORMAL;
672                 break;
673         }
674         }
675 }
676
677 /* Called on timeouts and events.  Timeouts should pass the elapsed
678    time, interrupts should pass in zero.  Must be called with
679    si_lock held and interrupts disabled. */
680 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
681                                            int time)
682 {
683         enum si_sm_result si_sm_result;
684
685  restart:
686         /* There used to be a loop here that waited a little while
687            (around 25us) before giving up.  That turned out to be
688            pointless, the minimum delays I was seeing were in the 300us
689            range, which is far too long to wait in an interrupt.  So
690            we just run until the state machine tells us something
691            happened or it needs a delay. */
692         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
693         time = 0;
694         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
695         {
696                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
697         }
698
699         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
700         {
701                 spin_lock(&smi_info->count_lock);
702                 smi_info->complete_transactions++;
703                 spin_unlock(&smi_info->count_lock);
704
705                 handle_transaction_done(smi_info);
706                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
707         }
708         else if (si_sm_result == SI_SM_HOSED)
709         {
710                 spin_lock(&smi_info->count_lock);
711                 smi_info->hosed_count++;
712                 spin_unlock(&smi_info->count_lock);
713
714                 /* Do the before return_hosed_msg, because that
715                    releases the lock. */
716                 smi_info->si_state = SI_NORMAL;
717                 if (smi_info->curr_msg != NULL) {
718                         /* If we were handling a user message, format
719                            a response to send to the upper layer to
720                            tell it about the error. */
721                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
722                 }
723                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
724         }
725
726         /*
727          * We prefer handling attn over new messages.  But don't do
728          * this if there is not yet an upper layer to handle anything.
729          */
730         if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN)
731         {
732                 unsigned char msg[2];
733
734                 spin_lock(&smi_info->count_lock);
735                 smi_info->attentions++;
736                 spin_unlock(&smi_info->count_lock);
737
738                 /* Got a attn, send down a get message flags to see
739                    what's causing it.  It would be better to handle
740                    this in the upper layer, but due to the way
741                    interrupts work with the SMI, that's not really
742                    possible. */
743                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
744                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
745
746                 smi_info->handlers->start_transaction(
747                         smi_info->si_sm, msg, 2);
748                 smi_info->si_state = SI_GETTING_FLAGS;
749                 goto restart;
750         }
751
752         /* If we are currently idle, try to start the next message. */
753         if (si_sm_result == SI_SM_IDLE) {
754                 spin_lock(&smi_info->count_lock);
755                 smi_info->idles++;
756                 spin_unlock(&smi_info->count_lock);
757
758                 si_sm_result = start_next_msg(smi_info);
759                 if (si_sm_result != SI_SM_IDLE)
760                         goto restart;
761         }
762
763         if ((si_sm_result == SI_SM_IDLE)
764             && (atomic_read(&smi_info->req_events)))
765         {
766                 /* We are idle and the upper layer requested that I fetch
767                    events, so do so. */
768                 atomic_set(&smi_info->req_events, 0);
769
770                 smi_info->curr_msg = ipmi_alloc_smi_msg();
771                 if (!smi_info->curr_msg)
772                         goto out;
773
774                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
775                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
776                 smi_info->curr_msg->data_size = 2;
777
778                 smi_info->handlers->start_transaction(
779                         smi_info->si_sm,
780                         smi_info->curr_msg->data,
781                         smi_info->curr_msg->data_size);
782                 smi_info->si_state = SI_GETTING_EVENTS;
783                 goto restart;
784         }
785  out:
786         return si_sm_result;
787 }
788
789 static void sender(void                *send_info,
790                    struct ipmi_smi_msg *msg,
791                    int                 priority)
792 {
793         struct smi_info   *smi_info = send_info;
794         enum si_sm_result result;
795         unsigned long     flags;
796 #ifdef DEBUG_TIMING
797         struct timeval    t;
798 #endif
799
800         if (atomic_read(&smi_info->stop_operation)) {
801                 msg->rsp[0] = msg->data[0] | 4;
802                 msg->rsp[1] = msg->data[1];
803                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
804                 msg->rsp_size = 3;
805                 deliver_recv_msg(smi_info, msg);
806                 return;
807         }
808
809 #ifdef DEBUG_TIMING
810         do_gettimeofday(&t);
811         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
812 #endif
813
814         if (smi_info->run_to_completion) {
815                 /*
816                  * If we are running to completion, then throw it in
817                  * the list and run transactions until everything is
818                  * clear.  Priority doesn't matter here.
819                  */
820
821                 /*
822                  * Run to completion means we are single-threaded, no
823                  * need for locks.
824                  */
825                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
826
827                 result = smi_event_handler(smi_info, 0);
828                 while (result != SI_SM_IDLE) {
829                         udelay(SI_SHORT_TIMEOUT_USEC);
830                         result = smi_event_handler(smi_info,
831                                                    SI_SHORT_TIMEOUT_USEC);
832                 }
833                 return;
834         }
835
836         spin_lock_irqsave(&smi_info->msg_lock, flags);
837         if (priority > 0)
838                 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
839         else
840                 list_add_tail(&msg->link, &smi_info->xmit_msgs);
841         spin_unlock_irqrestore(&smi_info->msg_lock, flags);
842
843         spin_lock_irqsave(&smi_info->si_lock, flags);
844         if ((smi_info->si_state == SI_NORMAL)
845             && (smi_info->curr_msg == NULL))
846         {
847                 start_next_msg(smi_info);
848         }
849         spin_unlock_irqrestore(&smi_info->si_lock, flags);
850 }
851
852 static void set_run_to_completion(void *send_info, int i_run_to_completion)
853 {
854         struct smi_info   *smi_info = send_info;
855         enum si_sm_result result;
856
857         smi_info->run_to_completion = i_run_to_completion;
858         if (i_run_to_completion) {
859                 result = smi_event_handler(smi_info, 0);
860                 while (result != SI_SM_IDLE) {
861                         udelay(SI_SHORT_TIMEOUT_USEC);
862                         result = smi_event_handler(smi_info,
863                                                    SI_SHORT_TIMEOUT_USEC);
864                 }
865         }
866 }
867
868 static int ipmi_thread(void *data)
869 {
870         struct smi_info *smi_info = data;
871         unsigned long flags;
872         enum si_sm_result smi_result;
873
874         set_user_nice(current, 19);
875         while (!kthread_should_stop()) {
876                 spin_lock_irqsave(&(smi_info->si_lock), flags);
877                 smi_result = smi_event_handler(smi_info, 0);
878                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
879                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
880                         /* do nothing */
881                 }
882                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
883                         schedule();
884                 else
885                         schedule_timeout_interruptible(1);
886         }
887         return 0;
888 }
889
890
891 static void poll(void *send_info)
892 {
893         struct smi_info *smi_info = send_info;
894         unsigned long flags;
895
896         /*
897          * Make sure there is some delay in the poll loop so we can
898          * drive time forward and timeout things.
899          */
900         udelay(10);
901         spin_lock_irqsave(&smi_info->si_lock, flags);
902         smi_event_handler(smi_info, 10);
903         spin_unlock_irqrestore(&smi_info->si_lock, flags);
904 }
905
906 static void request_events(void *send_info)
907 {
908         struct smi_info *smi_info = send_info;
909
910         if (atomic_read(&smi_info->stop_operation))
911                 return;
912
913         atomic_set(&smi_info->req_events, 1);
914 }
915
916 static int initialized;
917
918 static void smi_timeout(unsigned long data)
919 {
920         struct smi_info   *smi_info = (struct smi_info *) data;
921         enum si_sm_result smi_result;
922         unsigned long     flags;
923         unsigned long     jiffies_now;
924         long              time_diff;
925 #ifdef DEBUG_TIMING
926         struct timeval    t;
927 #endif
928
929         spin_lock_irqsave(&(smi_info->si_lock), flags);
930 #ifdef DEBUG_TIMING
931         do_gettimeofday(&t);
932         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
933 #endif
934         jiffies_now = jiffies;
935         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
936                      * SI_USEC_PER_JIFFY);
937         smi_result = smi_event_handler(smi_info, time_diff);
938
939         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
940
941         smi_info->last_timeout_jiffies = jiffies_now;
942
943         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
944                 /* Running with interrupts, only do long timeouts. */
945                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
946                 spin_lock_irqsave(&smi_info->count_lock, flags);
947                 smi_info->long_timeouts++;
948                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
949                 goto do_add_timer;
950         }
951
952         /* If the state machine asks for a short delay, then shorten
953            the timer timeout. */
954         if (smi_result == SI_SM_CALL_WITH_DELAY) {
955                 spin_lock_irqsave(&smi_info->count_lock, flags);
956                 smi_info->short_timeouts++;
957                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
958                 smi_info->si_timer.expires = jiffies + 1;
959         } else {
960                 spin_lock_irqsave(&smi_info->count_lock, flags);
961                 smi_info->long_timeouts++;
962                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
963                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
964         }
965
966  do_add_timer:
967         add_timer(&(smi_info->si_timer));
968 }
969
970 static irqreturn_t si_irq_handler(int irq, void *data)
971 {
972         struct smi_info *smi_info = data;
973         unsigned long   flags;
974 #ifdef DEBUG_TIMING
975         struct timeval  t;
976 #endif
977
978         spin_lock_irqsave(&(smi_info->si_lock), flags);
979
980         spin_lock(&smi_info->count_lock);
981         smi_info->interrupts++;
982         spin_unlock(&smi_info->count_lock);
983
984 #ifdef DEBUG_TIMING
985         do_gettimeofday(&t);
986         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
987 #endif
988         smi_event_handler(smi_info, 0);
989         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
990         return IRQ_HANDLED;
991 }
992
993 static irqreturn_t si_bt_irq_handler(int irq, void *data)
994 {
995         struct smi_info *smi_info = data;
996         /* We need to clear the IRQ flag for the BT interface. */
997         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
998                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
999                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1000         return si_irq_handler(irq, data);
1001 }
1002
1003 static int smi_start_processing(void       *send_info,
1004                                 ipmi_smi_t intf)
1005 {
1006         struct smi_info *new_smi = send_info;
1007         int             enable = 0;
1008
1009         new_smi->intf = intf;
1010
1011         /* Try to claim any interrupts. */
1012         if (new_smi->irq_setup)
1013                 new_smi->irq_setup(new_smi);
1014
1015         /* Set up the timer that drives the interface. */
1016         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1017         new_smi->last_timeout_jiffies = jiffies;
1018         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1019
1020         /*
1021          * Check if the user forcefully enabled the daemon.
1022          */
1023         if (new_smi->intf_num < num_force_kipmid)
1024                 enable = force_kipmid[new_smi->intf_num];
1025         /*
1026          * The BT interface is efficient enough to not need a thread,
1027          * and there is no need for a thread if we have interrupts.
1028          */
1029         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1030                 enable = 1;
1031
1032         if (enable) {
1033                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1034                                               "kipmi%d", new_smi->intf_num);
1035                 if (IS_ERR(new_smi->thread)) {
1036                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1037                                " kernel thread due to error %ld, only using"
1038                                " timers to drive the interface\n",
1039                                PTR_ERR(new_smi->thread));
1040                         new_smi->thread = NULL;
1041                 }
1042         }
1043
1044         return 0;
1045 }
1046
1047 static void set_maintenance_mode(void *send_info, int enable)
1048 {
1049         struct smi_info   *smi_info = send_info;
1050
1051         if (!enable)
1052                 atomic_set(&smi_info->req_events, 0);
1053 }
1054
1055 static struct ipmi_smi_handlers handlers =
1056 {
1057         .owner                  = THIS_MODULE,
1058         .start_processing       = smi_start_processing,
1059         .sender                 = sender,
1060         .request_events         = request_events,
1061         .set_maintenance_mode   = set_maintenance_mode,
1062         .set_run_to_completion  = set_run_to_completion,
1063         .poll                   = poll,
1064 };
1065
1066 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1067    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1068
1069 static LIST_HEAD(smi_infos);
1070 static DEFINE_MUTEX(smi_infos_lock);
1071 static int smi_num; /* Used to sequence the SMIs */
1072
1073 #define DEFAULT_REGSPACING      1
1074 #define DEFAULT_REGSIZE         1
1075
1076 static int           si_trydefaults = 1;
1077 static char          *si_type[SI_MAX_PARMS];
1078 #define MAX_SI_TYPE_STR 30
1079 static char          si_type_str[MAX_SI_TYPE_STR];
1080 static unsigned long addrs[SI_MAX_PARMS];
1081 static unsigned int num_addrs;
1082 static unsigned int  ports[SI_MAX_PARMS];
1083 static unsigned int num_ports;
1084 static int           irqs[SI_MAX_PARMS];
1085 static unsigned int num_irqs;
1086 static int           regspacings[SI_MAX_PARMS];
1087 static unsigned int num_regspacings;
1088 static int           regsizes[SI_MAX_PARMS];
1089 static unsigned int num_regsizes;
1090 static int           regshifts[SI_MAX_PARMS];
1091 static unsigned int num_regshifts;
1092 static int slave_addrs[SI_MAX_PARMS];
1093 static unsigned int num_slave_addrs;
1094
1095 #define IPMI_IO_ADDR_SPACE  0
1096 #define IPMI_MEM_ADDR_SPACE 1
1097 static char *addr_space_to_str[] = { "i/o", "mem" };
1098
1099 static int hotmod_handler(const char *val, struct kernel_param *kp);
1100
1101 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1102 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1103                  " Documentation/IPMI.txt in the kernel sources for the"
1104                  " gory details.");
1105
1106 module_param_named(trydefaults, si_trydefaults, bool, 0);
1107 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1108                  " default scan of the KCS and SMIC interface at the standard"
1109                  " address");
1110 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1111 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1112                  " interface separated by commas.  The types are 'kcs',"
1113                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1114                  " the first interface to kcs and the second to bt");
1115 module_param_array(addrs, ulong, &num_addrs, 0);
1116 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1117                  " addresses separated by commas.  Only use if an interface"
1118                  " is in memory.  Otherwise, set it to zero or leave"
1119                  " it blank.");
1120 module_param_array(ports, uint, &num_ports, 0);
1121 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1122                  " addresses separated by commas.  Only use if an interface"
1123                  " is a port.  Otherwise, set it to zero or leave"
1124                  " it blank.");
1125 module_param_array(irqs, int, &num_irqs, 0);
1126 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1127                  " addresses separated by commas.  Only use if an interface"
1128                  " has an interrupt.  Otherwise, set it to zero or leave"
1129                  " it blank.");
1130 module_param_array(regspacings, int, &num_regspacings, 0);
1131 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1132                  " and each successive register used by the interface.  For"
1133                  " instance, if the start address is 0xca2 and the spacing"
1134                  " is 2, then the second address is at 0xca4.  Defaults"
1135                  " to 1.");
1136 module_param_array(regsizes, int, &num_regsizes, 0);
1137 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1138                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1139                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1140                  " the 8-bit IPMI register has to be read from a larger"
1141                  " register.");
1142 module_param_array(regshifts, int, &num_regshifts, 0);
1143 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1144                  " IPMI register, in bits.  For instance, if the data"
1145                  " is read from a 32-bit word and the IPMI data is in"
1146                  " bit 8-15, then the shift would be 8");
1147 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1148 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1149                  " the controller.  Normally this is 0x20, but can be"
1150                  " overridden by this parm.  This is an array indexed"
1151                  " by interface number.");
1152 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1153 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1154                  " disabled(0).  Normally the IPMI driver auto-detects"
1155                  " this, but the value may be overridden by this parm.");
1156 module_param(unload_when_empty, int, 0);
1157 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1158                  " specified or found, default is 1.  Setting to 0"
1159                  " is useful for hot add of devices using hotmod.");
1160
1161
1162 static void std_irq_cleanup(struct smi_info *info)
1163 {
1164         if (info->si_type == SI_BT)
1165                 /* Disable the interrupt in the BT interface. */
1166                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1167         free_irq(info->irq, info);
1168 }
1169
1170 static int std_irq_setup(struct smi_info *info)
1171 {
1172         int rv;
1173
1174         if (!info->irq)
1175                 return 0;
1176
1177         if (info->si_type == SI_BT) {
1178                 rv = request_irq(info->irq,
1179                                  si_bt_irq_handler,
1180                                  IRQF_SHARED | IRQF_DISABLED,
1181                                  DEVICE_NAME,
1182                                  info);
1183                 if (!rv)
1184                         /* Enable the interrupt in the BT interface. */
1185                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1186                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1187         } else
1188                 rv = request_irq(info->irq,
1189                                  si_irq_handler,
1190                                  IRQF_SHARED | IRQF_DISABLED,
1191                                  DEVICE_NAME,
1192                                  info);
1193         if (rv) {
1194                 printk(KERN_WARNING
1195                        "ipmi_si: %s unable to claim interrupt %d,"
1196                        " running polled\n",
1197                        DEVICE_NAME, info->irq);
1198                 info->irq = 0;
1199         } else {
1200                 info->irq_cleanup = std_irq_cleanup;
1201                 printk("  Using irq %d\n", info->irq);
1202         }
1203
1204         return rv;
1205 }
1206
1207 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1208 {
1209         unsigned int addr = io->addr_data;
1210
1211         return inb(addr + (offset * io->regspacing));
1212 }
1213
1214 static void port_outb(struct si_sm_io *io, unsigned int offset,
1215                       unsigned char b)
1216 {
1217         unsigned int addr = io->addr_data;
1218
1219         outb(b, addr + (offset * io->regspacing));
1220 }
1221
1222 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1223 {
1224         unsigned int addr = io->addr_data;
1225
1226         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1227 }
1228
1229 static void port_outw(struct si_sm_io *io, unsigned int offset,
1230                       unsigned char b)
1231 {
1232         unsigned int addr = io->addr_data;
1233
1234         outw(b << io->regshift, addr + (offset * io->regspacing));
1235 }
1236
1237 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1238 {
1239         unsigned int addr = io->addr_data;
1240
1241         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1242 }
1243
1244 static void port_outl(struct si_sm_io *io, unsigned int offset,
1245                       unsigned char b)
1246 {
1247         unsigned int addr = io->addr_data;
1248
1249         outl(b << io->regshift, addr+(offset * io->regspacing));
1250 }
1251
1252 static void port_cleanup(struct smi_info *info)
1253 {
1254         unsigned int addr = info->io.addr_data;
1255         int          idx;
1256
1257         if (addr) {
1258                 for (idx = 0; idx < info->io_size; idx++) {
1259                         release_region(addr + idx * info->io.regspacing,
1260                                        info->io.regsize);
1261                 }
1262         }
1263 }
1264
1265 static int port_setup(struct smi_info *info)
1266 {
1267         unsigned int addr = info->io.addr_data;
1268         int          idx;
1269
1270         if (!addr)
1271                 return -ENODEV;
1272
1273         info->io_cleanup = port_cleanup;
1274
1275         /* Figure out the actual inb/inw/inl/etc routine to use based
1276            upon the register size. */
1277         switch (info->io.regsize) {
1278         case 1:
1279                 info->io.inputb = port_inb;
1280                 info->io.outputb = port_outb;
1281                 break;
1282         case 2:
1283                 info->io.inputb = port_inw;
1284                 info->io.outputb = port_outw;
1285                 break;
1286         case 4:
1287                 info->io.inputb = port_inl;
1288                 info->io.outputb = port_outl;
1289                 break;
1290         default:
1291                 printk("ipmi_si: Invalid register size: %d\n",
1292                        info->io.regsize);
1293                 return -EINVAL;
1294         }
1295
1296         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1297          * tables.  This causes problems when trying to register the
1298          * entire I/O region.  Therefore we must register each I/O
1299          * port separately.
1300          */
1301         for (idx = 0; idx < info->io_size; idx++) {
1302                 if (request_region(addr + idx * info->io.regspacing,
1303                                    info->io.regsize, DEVICE_NAME) == NULL) {
1304                         /* Undo allocations */
1305                         while (idx--) {
1306                                 release_region(addr + idx * info->io.regspacing,
1307                                                info->io.regsize);
1308                         }
1309                         return -EIO;
1310                 }
1311         }
1312         return 0;
1313 }
1314
1315 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1316 {
1317         return readb((io->addr)+(offset * io->regspacing));
1318 }
1319
1320 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1321                      unsigned char b)
1322 {
1323         writeb(b, (io->addr)+(offset * io->regspacing));
1324 }
1325
1326 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1327 {
1328         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1329                 & 0xff;
1330 }
1331
1332 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1333                      unsigned char b)
1334 {
1335         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1336 }
1337
1338 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1339 {
1340         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1341                 & 0xff;
1342 }
1343
1344 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1345                      unsigned char b)
1346 {
1347         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1348 }
1349
1350 #ifdef readq
1351 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1352 {
1353         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1354                 & 0xff;
1355 }
1356
1357 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1358                      unsigned char b)
1359 {
1360         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1361 }
1362 #endif
1363
1364 static void mem_cleanup(struct smi_info *info)
1365 {
1366         unsigned long addr = info->io.addr_data;
1367         int           mapsize;
1368
1369         if (info->io.addr) {
1370                 iounmap(info->io.addr);
1371
1372                 mapsize = ((info->io_size * info->io.regspacing)
1373                            - (info->io.regspacing - info->io.regsize));
1374
1375                 release_mem_region(addr, mapsize);
1376         }
1377 }
1378
1379 static int mem_setup(struct smi_info *info)
1380 {
1381         unsigned long addr = info->io.addr_data;
1382         int           mapsize;
1383
1384         if (!addr)
1385                 return -ENODEV;
1386
1387         info->io_cleanup = mem_cleanup;
1388
1389         /* Figure out the actual readb/readw/readl/etc routine to use based
1390            upon the register size. */
1391         switch (info->io.regsize) {
1392         case 1:
1393                 info->io.inputb = intf_mem_inb;
1394                 info->io.outputb = intf_mem_outb;
1395                 break;
1396         case 2:
1397                 info->io.inputb = intf_mem_inw;
1398                 info->io.outputb = intf_mem_outw;
1399                 break;
1400         case 4:
1401                 info->io.inputb = intf_mem_inl;
1402                 info->io.outputb = intf_mem_outl;
1403                 break;
1404 #ifdef readq
1405         case 8:
1406                 info->io.inputb = mem_inq;
1407                 info->io.outputb = mem_outq;
1408                 break;
1409 #endif
1410         default:
1411                 printk("ipmi_si: Invalid register size: %d\n",
1412                        info->io.regsize);
1413                 return -EINVAL;
1414         }
1415
1416         /* Calculate the total amount of memory to claim.  This is an
1417          * unusual looking calculation, but it avoids claiming any
1418          * more memory than it has to.  It will claim everything
1419          * between the first address to the end of the last full
1420          * register. */
1421         mapsize = ((info->io_size * info->io.regspacing)
1422                    - (info->io.regspacing - info->io.regsize));
1423
1424         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1425                 return -EIO;
1426
1427         info->io.addr = ioremap(addr, mapsize);
1428         if (info->io.addr == NULL) {
1429                 release_mem_region(addr, mapsize);
1430                 return -EIO;
1431         }
1432         return 0;
1433 }
1434
1435 /*
1436  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1437  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1438  * Options are:
1439  *   rsp=<regspacing>
1440  *   rsi=<regsize>
1441  *   rsh=<regshift>
1442  *   irq=<irq>
1443  *   ipmb=<ipmb addr>
1444  */
1445 enum hotmod_op { HM_ADD, HM_REMOVE };
1446 struct hotmod_vals {
1447         char *name;
1448         int  val;
1449 };
1450 static struct hotmod_vals hotmod_ops[] = {
1451         { "add",        HM_ADD },
1452         { "remove",     HM_REMOVE },
1453         { NULL }
1454 };
1455 static struct hotmod_vals hotmod_si[] = {
1456         { "kcs",        SI_KCS },
1457         { "smic",       SI_SMIC },
1458         { "bt",         SI_BT },
1459         { NULL }
1460 };
1461 static struct hotmod_vals hotmod_as[] = {
1462         { "mem",        IPMI_MEM_ADDR_SPACE },
1463         { "i/o",        IPMI_IO_ADDR_SPACE },
1464         { NULL }
1465 };
1466
1467 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1468 {
1469         char *s;
1470         int  i;
1471
1472         s = strchr(*curr, ',');
1473         if (!s) {
1474                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1475                 return -EINVAL;
1476         }
1477         *s = '\0';
1478         s++;
1479         for (i = 0; hotmod_ops[i].name; i++) {
1480                 if (strcmp(*curr, v[i].name) == 0) {
1481                         *val = v[i].val;
1482                         *curr = s;
1483                         return 0;
1484                 }
1485         }
1486
1487         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1488         return -EINVAL;
1489 }
1490
1491 static int check_hotmod_int_op(const char *curr, const char *option,
1492                                const char *name, int *val)
1493 {
1494         char *n;
1495
1496         if (strcmp(curr, name) == 0) {
1497                 if (!option) {
1498                         printk(KERN_WARNING PFX
1499                                "No option given for '%s'\n",
1500                                curr);
1501                         return -EINVAL;
1502                 }
1503                 *val = simple_strtoul(option, &n, 0);
1504                 if ((*n != '\0') || (*option == '\0')) {
1505                         printk(KERN_WARNING PFX
1506                                "Bad option given for '%s'\n",
1507                                curr);
1508                         return -EINVAL;
1509                 }
1510                 return 1;
1511         }
1512         return 0;
1513 }
1514
1515 static int hotmod_handler(const char *val, struct kernel_param *kp)
1516 {
1517         char *str = kstrdup(val, GFP_KERNEL);
1518         int  rv;
1519         char *next, *curr, *s, *n, *o;
1520         enum hotmod_op op;
1521         enum si_type si_type;
1522         int  addr_space;
1523         unsigned long addr;
1524         int regspacing;
1525         int regsize;
1526         int regshift;
1527         int irq;
1528         int ipmb;
1529         int ival;
1530         int len;
1531         struct smi_info *info;
1532
1533         if (!str)
1534                 return -ENOMEM;
1535
1536         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1537         len = strlen(str);
1538         ival = len - 1;
1539         while ((ival >= 0) && isspace(str[ival])) {
1540                 str[ival] = '\0';
1541                 ival--;
1542         }
1543
1544         for (curr = str; curr; curr = next) {
1545                 regspacing = 1;
1546                 regsize = 1;
1547                 regshift = 0;
1548                 irq = 0;
1549                 ipmb = 0x20;
1550
1551                 next = strchr(curr, ':');
1552                 if (next) {
1553                         *next = '\0';
1554                         next++;
1555                 }
1556
1557                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1558                 if (rv)
1559                         break;
1560                 op = ival;
1561
1562                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1563                 if (rv)
1564                         break;
1565                 si_type = ival;
1566
1567                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1568                 if (rv)
1569                         break;
1570
1571                 s = strchr(curr, ',');
1572                 if (s) {
1573                         *s = '\0';
1574                         s++;
1575                 }
1576                 addr = simple_strtoul(curr, &n, 0);
1577                 if ((*n != '\0') || (*curr == '\0')) {
1578                         printk(KERN_WARNING PFX "Invalid hotmod address"
1579                                " '%s'\n", curr);
1580                         break;
1581                 }
1582
1583                 while (s) {
1584                         curr = s;
1585                         s = strchr(curr, ',');
1586                         if (s) {
1587                                 *s = '\0';
1588                                 s++;
1589                         }
1590                         o = strchr(curr, '=');
1591                         if (o) {
1592                                 *o = '\0';
1593                                 o++;
1594                         }
1595                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1596                         if (rv < 0)
1597                                 goto out;
1598                         else if (rv)
1599                                 continue;
1600                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1601                         if (rv < 0)
1602                                 goto out;
1603                         else if (rv)
1604                                 continue;
1605                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1606                         if (rv < 0)
1607                                 goto out;
1608                         else if (rv)
1609                                 continue;
1610                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1611                         if (rv < 0)
1612                                 goto out;
1613                         else if (rv)
1614                                 continue;
1615                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1616                         if (rv < 0)
1617                                 goto out;
1618                         else if (rv)
1619                                 continue;
1620
1621                         rv = -EINVAL;
1622                         printk(KERN_WARNING PFX
1623                                "Invalid hotmod option '%s'\n",
1624                                curr);
1625                         goto out;
1626                 }
1627
1628                 if (op == HM_ADD) {
1629                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1630                         if (!info) {
1631                                 rv = -ENOMEM;
1632                                 goto out;
1633                         }
1634
1635                         info->addr_source = "hotmod";
1636                         info->si_type = si_type;
1637                         info->io.addr_data = addr;
1638                         info->io.addr_type = addr_space;
1639                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1640                                 info->io_setup = mem_setup;
1641                         else
1642                                 info->io_setup = port_setup;
1643
1644                         info->io.addr = NULL;
1645                         info->io.regspacing = regspacing;
1646                         if (!info->io.regspacing)
1647                                 info->io.regspacing = DEFAULT_REGSPACING;
1648                         info->io.regsize = regsize;
1649                         if (!info->io.regsize)
1650                                 info->io.regsize = DEFAULT_REGSPACING;
1651                         info->io.regshift = regshift;
1652                         info->irq = irq;
1653                         if (info->irq)
1654                                 info->irq_setup = std_irq_setup;
1655                         info->slave_addr = ipmb;
1656
1657                         try_smi_init(info);
1658                 } else {
1659                         /* remove */
1660                         struct smi_info *e, *tmp_e;
1661
1662                         mutex_lock(&smi_infos_lock);
1663                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1664                                 if (e->io.addr_type != addr_space)
1665                                         continue;
1666                                 if (e->si_type != si_type)
1667                                         continue;
1668                                 if (e->io.addr_data == addr)
1669                                         cleanup_one_si(e);
1670                         }
1671                         mutex_unlock(&smi_infos_lock);
1672                 }
1673         }
1674         rv = len;
1675  out:
1676         kfree(str);
1677         return rv;
1678 }
1679
1680 static __devinit void hardcode_find_bmc(void)
1681 {
1682         int             i;
1683         struct smi_info *info;
1684
1685         for (i = 0; i < SI_MAX_PARMS; i++) {
1686                 if (!ports[i] && !addrs[i])
1687                         continue;
1688
1689                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1690                 if (!info)
1691                         return;
1692
1693                 info->addr_source = "hardcoded";
1694
1695                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1696                         info->si_type = SI_KCS;
1697                 } else if (strcmp(si_type[i], "smic") == 0) {
1698                         info->si_type = SI_SMIC;
1699                 } else if (strcmp(si_type[i], "bt") == 0) {
1700                         info->si_type = SI_BT;
1701                 } else {
1702                         printk(KERN_WARNING
1703                                "ipmi_si: Interface type specified "
1704                                "for interface %d, was invalid: %s\n",
1705                                i, si_type[i]);
1706                         kfree(info);
1707                         continue;
1708                 }
1709
1710                 if (ports[i]) {
1711                         /* An I/O port */
1712                         info->io_setup = port_setup;
1713                         info->io.addr_data = ports[i];
1714                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1715                 } else if (addrs[i]) {
1716                         /* A memory port */
1717                         info->io_setup = mem_setup;
1718                         info->io.addr_data = addrs[i];
1719                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1720                 } else {
1721                         printk(KERN_WARNING
1722                                "ipmi_si: Interface type specified "
1723                                "for interface %d, "
1724                                "but port and address were not set or "
1725                                "set to zero.\n", i);
1726                         kfree(info);
1727                         continue;
1728                 }
1729
1730                 info->io.addr = NULL;
1731                 info->io.regspacing = regspacings[i];
1732                 if (!info->io.regspacing)
1733                         info->io.regspacing = DEFAULT_REGSPACING;
1734                 info->io.regsize = regsizes[i];
1735                 if (!info->io.regsize)
1736                         info->io.regsize = DEFAULT_REGSPACING;
1737                 info->io.regshift = regshifts[i];
1738                 info->irq = irqs[i];
1739                 if (info->irq)
1740                         info->irq_setup = std_irq_setup;
1741
1742                 try_smi_init(info);
1743         }
1744 }
1745
1746 #ifdef CONFIG_ACPI
1747
1748 #include <linux/acpi.h>
1749
1750 /* Once we get an ACPI failure, we don't try any more, because we go
1751    through the tables sequentially.  Once we don't find a table, there
1752    are no more. */
1753 static int acpi_failure;
1754
1755 /* For GPE-type interrupts. */
1756 static u32 ipmi_acpi_gpe(void *context)
1757 {
1758         struct smi_info *smi_info = context;
1759         unsigned long   flags;
1760 #ifdef DEBUG_TIMING
1761         struct timeval t;
1762 #endif
1763
1764         spin_lock_irqsave(&(smi_info->si_lock), flags);
1765
1766         spin_lock(&smi_info->count_lock);
1767         smi_info->interrupts++;
1768         spin_unlock(&smi_info->count_lock);
1769
1770 #ifdef DEBUG_TIMING
1771         do_gettimeofday(&t);
1772         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1773 #endif
1774         smi_event_handler(smi_info, 0);
1775         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1776
1777         return ACPI_INTERRUPT_HANDLED;
1778 }
1779
1780 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1781 {
1782         if (!info->irq)
1783                 return;
1784
1785         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1786 }
1787
1788 static int acpi_gpe_irq_setup(struct smi_info *info)
1789 {
1790         acpi_status status;
1791
1792         if (!info->irq)
1793                 return 0;
1794
1795         /* FIXME - is level triggered right? */
1796         status = acpi_install_gpe_handler(NULL,
1797                                           info->irq,
1798                                           ACPI_GPE_LEVEL_TRIGGERED,
1799                                           &ipmi_acpi_gpe,
1800                                           info);
1801         if (status != AE_OK) {
1802                 printk(KERN_WARNING
1803                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1804                        " running polled\n",
1805                        DEVICE_NAME, info->irq);
1806                 info->irq = 0;
1807                 return -EINVAL;
1808         } else {
1809                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1810                 printk("  Using ACPI GPE %d\n", info->irq);
1811                 return 0;
1812         }
1813 }
1814
1815 /*
1816  * Defined at
1817  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1818  */
1819 struct SPMITable {
1820         s8      Signature[4];
1821         u32     Length;
1822         u8      Revision;
1823         u8      Checksum;
1824         s8      OEMID[6];
1825         s8      OEMTableID[8];
1826         s8      OEMRevision[4];
1827         s8      CreatorID[4];
1828         s8      CreatorRevision[4];
1829         u8      InterfaceType;
1830         u8      IPMIlegacy;
1831         s16     SpecificationRevision;
1832
1833         /*
1834          * Bit 0 - SCI interrupt supported
1835          * Bit 1 - I/O APIC/SAPIC
1836          */
1837         u8      InterruptType;
1838
1839         /* If bit 0 of InterruptType is set, then this is the SCI
1840            interrupt in the GPEx_STS register. */
1841         u8      GPE;
1842
1843         s16     Reserved;
1844
1845         /* If bit 1 of InterruptType is set, then this is the I/O
1846            APIC/SAPIC interrupt. */
1847         u32     GlobalSystemInterrupt;
1848
1849         /* The actual register address. */
1850         struct acpi_generic_address addr;
1851
1852         u8      UID[4];
1853
1854         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1855 };
1856
1857 static __devinit int try_init_acpi(struct SPMITable *spmi)
1858 {
1859         struct smi_info  *info;
1860         u8               addr_space;
1861
1862         if (spmi->IPMIlegacy != 1) {
1863             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1864             return -ENODEV;
1865         }
1866
1867         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1868                 addr_space = IPMI_MEM_ADDR_SPACE;
1869         else
1870                 addr_space = IPMI_IO_ADDR_SPACE;
1871
1872         info = kzalloc(sizeof(*info), GFP_KERNEL);
1873         if (!info) {
1874                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1875                 return -ENOMEM;
1876         }
1877
1878         info->addr_source = "ACPI";
1879
1880         /* Figure out the interface type. */
1881         switch (spmi->InterfaceType)
1882         {
1883         case 1: /* KCS */
1884                 info->si_type = SI_KCS;
1885                 break;
1886         case 2: /* SMIC */
1887                 info->si_type = SI_SMIC;
1888                 break;
1889         case 3: /* BT */
1890                 info->si_type = SI_BT;
1891                 break;
1892         default:
1893                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1894                         spmi->InterfaceType);
1895                 kfree(info);
1896                 return -EIO;
1897         }
1898
1899         if (spmi->InterruptType & 1) {
1900                 /* We've got a GPE interrupt. */
1901                 info->irq = spmi->GPE;
1902                 info->irq_setup = acpi_gpe_irq_setup;
1903         } else if (spmi->InterruptType & 2) {
1904                 /* We've got an APIC/SAPIC interrupt. */
1905                 info->irq = spmi->GlobalSystemInterrupt;
1906                 info->irq_setup = std_irq_setup;
1907         } else {
1908                 /* Use the default interrupt setting. */
1909                 info->irq = 0;
1910                 info->irq_setup = NULL;
1911         }
1912
1913         if (spmi->addr.bit_width) {
1914                 /* A (hopefully) properly formed register bit width. */
1915                 info->io.regspacing = spmi->addr.bit_width / 8;
1916         } else {
1917                 info->io.regspacing = DEFAULT_REGSPACING;
1918         }
1919         info->io.regsize = info->io.regspacing;
1920         info->io.regshift = spmi->addr.bit_offset;
1921
1922         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1923                 info->io_setup = mem_setup;
1924                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1925         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1926                 info->io_setup = port_setup;
1927                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1928         } else {
1929                 kfree(info);
1930                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1931                 return -EIO;
1932         }
1933         info->io.addr_data = spmi->addr.address;
1934
1935         try_smi_init(info);
1936
1937         return 0;
1938 }
1939
1940 static __devinit void acpi_find_bmc(void)
1941 {
1942         acpi_status      status;
1943         struct SPMITable *spmi;
1944         int              i;
1945
1946         if (acpi_disabled)
1947                 return;
1948
1949         if (acpi_failure)
1950                 return;
1951
1952         for (i = 0; ; i++) {
1953                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1954                                         (struct acpi_table_header **)&spmi);
1955                 if (status != AE_OK)
1956                         return;
1957
1958                 try_init_acpi(spmi);
1959         }
1960 }
1961 #endif
1962
1963 #ifdef CONFIG_DMI
1964 struct dmi_ipmi_data
1965 {
1966         u8              type;
1967         u8              addr_space;
1968         unsigned long   base_addr;
1969         u8              irq;
1970         u8              offset;
1971         u8              slave_addr;
1972 };
1973
1974 static int __devinit decode_dmi(const struct dmi_header *dm,
1975                                 struct dmi_ipmi_data *dmi)
1976 {
1977         const u8        *data = (const u8 *)dm;
1978         unsigned long   base_addr;
1979         u8              reg_spacing;
1980         u8              len = dm->length;
1981
1982         dmi->type = data[4];
1983
1984         memcpy(&base_addr, data+8, sizeof(unsigned long));
1985         if (len >= 0x11) {
1986                 if (base_addr & 1) {
1987                         /* I/O */
1988                         base_addr &= 0xFFFE;
1989                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1990                 }
1991                 else {
1992                         /* Memory */
1993                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1994                 }
1995                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1996                    is odd. */
1997                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1998
1999                 dmi->irq = data[0x11];
2000
2001                 /* The top two bits of byte 0x10 hold the register spacing. */
2002                 reg_spacing = (data[0x10] & 0xC0) >> 6;
2003                 switch(reg_spacing){
2004                 case 0x00: /* Byte boundaries */
2005                     dmi->offset = 1;
2006                     break;
2007                 case 0x01: /* 32-bit boundaries */
2008                     dmi->offset = 4;
2009                     break;
2010                 case 0x02: /* 16-byte boundaries */
2011                     dmi->offset = 16;
2012                     break;
2013                 default:
2014                     /* Some other interface, just ignore it. */
2015                     return -EIO;
2016                 }
2017         } else {
2018                 /* Old DMI spec. */
2019                 /* Note that technically, the lower bit of the base
2020                  * address should be 1 if the address is I/O and 0 if
2021                  * the address is in memory.  So many systems get that
2022                  * wrong (and all that I have seen are I/O) so we just
2023                  * ignore that bit and assume I/O.  Systems that use
2024                  * memory should use the newer spec, anyway. */
2025                 dmi->base_addr = base_addr & 0xfffe;
2026                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2027                 dmi->offset = 1;
2028         }
2029
2030         dmi->slave_addr = data[6];
2031
2032         return 0;
2033 }
2034
2035 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2036 {
2037         struct smi_info *info;
2038
2039         info = kzalloc(sizeof(*info), GFP_KERNEL);
2040         if (!info) {
2041                 printk(KERN_ERR
2042                        "ipmi_si: Could not allocate SI data\n");
2043                 return;
2044         }
2045
2046         info->addr_source = "SMBIOS";
2047
2048         switch (ipmi_data->type) {
2049         case 0x01: /* KCS */
2050                 info->si_type = SI_KCS;
2051                 break;
2052         case 0x02: /* SMIC */
2053                 info->si_type = SI_SMIC;
2054                 break;
2055         case 0x03: /* BT */
2056                 info->si_type = SI_BT;
2057                 break;
2058         default:
2059                 kfree(info);
2060                 return;
2061         }
2062
2063         switch (ipmi_data->addr_space) {
2064         case IPMI_MEM_ADDR_SPACE:
2065                 info->io_setup = mem_setup;
2066                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2067                 break;
2068
2069         case IPMI_IO_ADDR_SPACE:
2070                 info->io_setup = port_setup;
2071                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2072                 break;
2073
2074         default:
2075                 kfree(info);
2076                 printk(KERN_WARNING
2077                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2078                        ipmi_data->addr_space);
2079                 return;
2080         }
2081         info->io.addr_data = ipmi_data->base_addr;
2082
2083         info->io.regspacing = ipmi_data->offset;
2084         if (!info->io.regspacing)
2085                 info->io.regspacing = DEFAULT_REGSPACING;
2086         info->io.regsize = DEFAULT_REGSPACING;
2087         info->io.regshift = 0;
2088
2089         info->slave_addr = ipmi_data->slave_addr;
2090
2091         info->irq = ipmi_data->irq;
2092         if (info->irq)
2093                 info->irq_setup = std_irq_setup;
2094
2095         try_smi_init(info);
2096 }
2097
2098 static void __devinit dmi_find_bmc(void)
2099 {
2100         const struct dmi_device *dev = NULL;
2101         struct dmi_ipmi_data data;
2102         int                  rv;
2103
2104         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2105                 memset(&data, 0, sizeof(data));
2106                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2107                                 &data);
2108                 if (!rv)
2109                         try_init_dmi(&data);
2110         }
2111 }
2112 #endif /* CONFIG_DMI */
2113
2114 #ifdef CONFIG_PCI
2115
2116 #define PCI_ERMC_CLASSCODE              0x0C0700
2117 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2118 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2119 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2120 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2121 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2122
2123 #define PCI_HP_VENDOR_ID    0x103C
2124 #define PCI_MMC_DEVICE_ID   0x121A
2125 #define PCI_MMC_ADDR_CW     0x10
2126
2127 static void ipmi_pci_cleanup(struct smi_info *info)
2128 {
2129         struct pci_dev *pdev = info->addr_source_data;
2130
2131         pci_disable_device(pdev);
2132 }
2133
2134 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2135                                     const struct pci_device_id *ent)
2136 {
2137         int rv;
2138         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2139         struct smi_info *info;
2140         int first_reg_offset = 0;
2141
2142         info = kzalloc(sizeof(*info), GFP_KERNEL);
2143         if (!info)
2144                 return -ENOMEM;
2145
2146         info->addr_source = "PCI";
2147
2148         switch (class_type) {
2149         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2150                 info->si_type = SI_SMIC;
2151                 break;
2152
2153         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2154                 info->si_type = SI_KCS;
2155                 break;
2156
2157         case PCI_ERMC_CLASSCODE_TYPE_BT:
2158                 info->si_type = SI_BT;
2159                 break;
2160
2161         default:
2162                 kfree(info);
2163                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2164                        pci_name(pdev), class_type);
2165                 return -ENOMEM;
2166         }
2167
2168         rv = pci_enable_device(pdev);
2169         if (rv) {
2170                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2171                        pci_name(pdev));
2172                 kfree(info);
2173                 return rv;
2174         }
2175
2176         info->addr_source_cleanup = ipmi_pci_cleanup;
2177         info->addr_source_data = pdev;
2178
2179         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2180                 first_reg_offset = 1;
2181
2182         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2183                 info->io_setup = port_setup;
2184                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2185         } else {
2186                 info->io_setup = mem_setup;
2187                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2188         }
2189         info->io.addr_data = pci_resource_start(pdev, 0);
2190
2191         info->io.regspacing = DEFAULT_REGSPACING;
2192         info->io.regsize = DEFAULT_REGSPACING;
2193         info->io.regshift = 0;
2194
2195         info->irq = pdev->irq;
2196         if (info->irq)
2197                 info->irq_setup = std_irq_setup;
2198
2199         info->dev = &pdev->dev;
2200         pci_set_drvdata(pdev, info);
2201
2202         return try_smi_init(info);
2203 }
2204
2205 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2206 {
2207         struct smi_info *info = pci_get_drvdata(pdev);
2208         cleanup_one_si(info);
2209 }
2210
2211 #ifdef CONFIG_PM
2212 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2213 {
2214         return 0;
2215 }
2216
2217 static int ipmi_pci_resume(struct pci_dev *pdev)
2218 {
2219         return 0;
2220 }
2221 #endif
2222
2223 static struct pci_device_id ipmi_pci_devices[] = {
2224         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2225         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2226         { 0, }
2227 };
2228 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2229
2230 static struct pci_driver ipmi_pci_driver = {
2231         .name =         DEVICE_NAME,
2232         .id_table =     ipmi_pci_devices,
2233         .probe =        ipmi_pci_probe,
2234         .remove =       __devexit_p(ipmi_pci_remove),
2235 #ifdef CONFIG_PM
2236         .suspend =      ipmi_pci_suspend,
2237         .resume =       ipmi_pci_resume,
2238 #endif
2239 };
2240 #endif /* CONFIG_PCI */
2241
2242
2243 #ifdef CONFIG_PPC_OF
2244 static int __devinit ipmi_of_probe(struct of_device *dev,
2245                          const struct of_device_id *match)
2246 {
2247         struct smi_info *info;
2248         struct resource resource;
2249         const int *regsize, *regspacing, *regshift;
2250         struct device_node *np = dev->node;
2251         int ret;
2252         int proplen;
2253
2254         dev_info(&dev->dev, PFX "probing via device tree\n");
2255
2256         ret = of_address_to_resource(np, 0, &resource);
2257         if (ret) {
2258                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2259                 return ret;
2260         }
2261
2262         regsize = of_get_property(np, "reg-size", &proplen);
2263         if (regsize && proplen != 4) {
2264                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2265                 return -EINVAL;
2266         }
2267
2268         regspacing = of_get_property(np, "reg-spacing", &proplen);
2269         if (regspacing && proplen != 4) {
2270                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2271                 return -EINVAL;
2272         }
2273
2274         regshift = of_get_property(np, "reg-shift", &proplen);
2275         if (regshift && proplen != 4) {
2276                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2277                 return -EINVAL;
2278         }
2279
2280         info = kzalloc(sizeof(*info), GFP_KERNEL);
2281
2282         if (!info) {
2283                 dev_err(&dev->dev,
2284                         PFX "could not allocate memory for OF probe\n");
2285                 return -ENOMEM;
2286         }
2287
2288         info->si_type           = (enum si_type) match->data;
2289         info->addr_source       = "device-tree";
2290         info->io_setup          = mem_setup;
2291         info->irq_setup         = std_irq_setup;
2292
2293         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2294         info->io.addr_data      = resource.start;
2295
2296         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2297         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2298         info->io.regshift       = regshift ? *regshift : 0;
2299
2300         info->irq               = irq_of_parse_and_map(dev->node, 0);
2301         info->dev               = &dev->dev;
2302
2303         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2304                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2305                 info->irq);
2306
2307         dev->dev.driver_data = (void*) info;
2308
2309         return try_smi_init(info);
2310 }
2311
2312 static int __devexit ipmi_of_remove(struct of_device *dev)
2313 {
2314         cleanup_one_si(dev->dev.driver_data);
2315         return 0;
2316 }
2317
2318 static struct of_device_id ipmi_match[] =
2319 {
2320         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2321         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2322         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2323         {},
2324 };
2325
2326 static struct of_platform_driver ipmi_of_platform_driver =
2327 {
2328         .name           = "ipmi",
2329         .match_table    = ipmi_match,
2330         .probe          = ipmi_of_probe,
2331         .remove         = __devexit_p(ipmi_of_remove),
2332 };
2333 #endif /* CONFIG_PPC_OF */
2334
2335
2336 static int try_get_dev_id(struct smi_info *smi_info)
2337 {
2338         unsigned char         msg[2];
2339         unsigned char         *resp;
2340         unsigned long         resp_len;
2341         enum si_sm_result     smi_result;
2342         int                   rv = 0;
2343
2344         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2345         if (!resp)
2346                 return -ENOMEM;
2347
2348         /* Do a Get Device ID command, since it comes back with some
2349            useful info. */
2350         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2351         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2352         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2353
2354         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2355         for (;;)
2356         {
2357                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2358                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2359                         schedule_timeout_uninterruptible(1);
2360                         smi_result = smi_info->handlers->event(
2361                                 smi_info->si_sm, 100);
2362                 }
2363                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2364                 {
2365                         smi_result = smi_info->handlers->event(
2366                                 smi_info->si_sm, 0);
2367                 }
2368                 else
2369                         break;
2370         }
2371         if (smi_result == SI_SM_HOSED) {
2372                 /* We couldn't get the state machine to run, so whatever's at
2373                    the port is probably not an IPMI SMI interface. */
2374                 rv = -ENODEV;
2375                 goto out;
2376         }
2377
2378         /* Otherwise, we got some data. */
2379         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2380                                                   resp, IPMI_MAX_MSG_LENGTH);
2381
2382         /* Check and record info from the get device id, in case we need it. */
2383         rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2384
2385  out:
2386         kfree(resp);
2387         return rv;
2388 }
2389
2390 static int type_file_read_proc(char *page, char **start, off_t off,
2391                                int count, int *eof, void *data)
2392 {
2393         struct smi_info *smi = data;
2394
2395         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2396 }
2397
2398 static int stat_file_read_proc(char *page, char **start, off_t off,
2399                                int count, int *eof, void *data)
2400 {
2401         char            *out = (char *) page;
2402         struct smi_info *smi = data;
2403
2404         out += sprintf(out, "interrupts_enabled:    %d\n",
2405                        smi->irq && !smi->interrupt_disabled);
2406         out += sprintf(out, "short_timeouts:        %ld\n",
2407                        smi->short_timeouts);
2408         out += sprintf(out, "long_timeouts:         %ld\n",
2409                        smi->long_timeouts);
2410         out += sprintf(out, "timeout_restarts:      %ld\n",
2411                        smi->timeout_restarts);
2412         out += sprintf(out, "idles:                 %ld\n",
2413                        smi->idles);
2414         out += sprintf(out, "interrupts:            %ld\n",
2415                        smi->interrupts);
2416         out += sprintf(out, "attentions:            %ld\n",
2417                        smi->attentions);
2418         out += sprintf(out, "flag_fetches:          %ld\n",
2419                        smi->flag_fetches);
2420         out += sprintf(out, "hosed_count:           %ld\n",
2421                        smi->hosed_count);
2422         out += sprintf(out, "complete_transactions: %ld\n",
2423                        smi->complete_transactions);
2424         out += sprintf(out, "events:                %ld\n",
2425                        smi->events);
2426         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2427                        smi->watchdog_pretimeouts);
2428         out += sprintf(out, "incoming_messages:     %ld\n",
2429                        smi->incoming_messages);
2430
2431         return out - page;
2432 }
2433
2434 static int param_read_proc(char *page, char **start, off_t off,
2435                            int count, int *eof, void *data)
2436 {
2437         struct smi_info *smi = data;
2438
2439         return sprintf(page,
2440                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2441                        si_to_str[smi->si_type],
2442                        addr_space_to_str[smi->io.addr_type],
2443                        smi->io.addr_data,
2444                        smi->io.regspacing,
2445                        smi->io.regsize,
2446                        smi->io.regshift,
2447                        smi->irq,
2448                        smi->slave_addr);
2449 }
2450
2451 /*
2452  * oem_data_avail_to_receive_msg_avail
2453  * @info - smi_info structure with msg_flags set
2454  *
2455  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2456  * Returns 1 indicating need to re-run handle_flags().
2457  */
2458 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2459 {
2460         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2461                                 RECEIVE_MSG_AVAIL);
2462         return 1;
2463 }
2464
2465 /*
2466  * setup_dell_poweredge_oem_data_handler
2467  * @info - smi_info.device_id must be populated
2468  *
2469  * Systems that match, but have firmware version < 1.40 may assert
2470  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2471  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2472  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2473  * as RECEIVE_MSG_AVAIL instead.
2474  *
2475  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2476  * assert the OEM[012] bits, and if it did, the driver would have to
2477  * change to handle that properly, we don't actually check for the
2478  * firmware version.
2479  * Device ID = 0x20                BMC on PowerEdge 8G servers
2480  * Device Revision = 0x80
2481  * Firmware Revision1 = 0x01       BMC version 1.40
2482  * Firmware Revision2 = 0x40       BCD encoded
2483  * IPMI Version = 0x51             IPMI 1.5
2484  * Manufacturer ID = A2 02 00      Dell IANA
2485  *
2486  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2487  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2488  *
2489  */
2490 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2491 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2492 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2493 #define DELL_IANA_MFR_ID 0x0002a2
2494 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2495 {
2496         struct ipmi_device_id *id = &smi_info->device_id;
2497         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2498                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2499                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2500                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2501                         smi_info->oem_data_avail_handler =
2502                                 oem_data_avail_to_receive_msg_avail;
2503                 }
2504                 else if (ipmi_version_major(id) < 1 ||
2505                          (ipmi_version_major(id) == 1 &&
2506                           ipmi_version_minor(id) < 5)) {
2507                         smi_info->oem_data_avail_handler =
2508                                 oem_data_avail_to_receive_msg_avail;
2509                 }
2510         }
2511 }
2512
2513 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2514 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2515 {
2516         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2517
2518         /* Make it a reponse */
2519         msg->rsp[0] = msg->data[0] | 4;
2520         msg->rsp[1] = msg->data[1];
2521         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2522         msg->rsp_size = 3;
2523         smi_info->curr_msg = NULL;
2524         deliver_recv_msg(smi_info, msg);
2525 }
2526
2527 /*
2528  * dell_poweredge_bt_xaction_handler
2529  * @info - smi_info.device_id must be populated
2530  *
2531  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2532  * not respond to a Get SDR command if the length of the data
2533  * requested is exactly 0x3A, which leads to command timeouts and no
2534  * data returned.  This intercepts such commands, and causes userspace
2535  * callers to try again with a different-sized buffer, which succeeds.
2536  */
2537
2538 #define STORAGE_NETFN 0x0A
2539 #define STORAGE_CMD_GET_SDR 0x23
2540 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2541                                              unsigned long unused,
2542                                              void *in)
2543 {
2544         struct smi_info *smi_info = in;
2545         unsigned char *data = smi_info->curr_msg->data;
2546         unsigned int size   = smi_info->curr_msg->data_size;
2547         if (size >= 8 &&
2548             (data[0]>>2) == STORAGE_NETFN &&
2549             data[1] == STORAGE_CMD_GET_SDR &&
2550             data[7] == 0x3A) {
2551                 return_hosed_msg_badsize(smi_info);
2552                 return NOTIFY_STOP;
2553         }
2554         return NOTIFY_DONE;
2555 }
2556
2557 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2558         .notifier_call  = dell_poweredge_bt_xaction_handler,
2559 };
2560
2561 /*
2562  * setup_dell_poweredge_bt_xaction_handler
2563  * @info - smi_info.device_id must be filled in already
2564  *
2565  * Fills in smi_info.device_id.start_transaction_pre_hook
2566  * when we know what function to use there.
2567  */
2568 static void
2569 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2570 {
2571         struct ipmi_device_id *id = &smi_info->device_id;
2572         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2573             smi_info->si_type == SI_BT)
2574                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2575 }
2576
2577 /*
2578  * setup_oem_data_handler
2579  * @info - smi_info.device_id must be filled in already
2580  *
2581  * Fills in smi_info.device_id.oem_data_available_handler
2582  * when we know what function to use there.
2583  */
2584
2585 static void setup_oem_data_handler(struct smi_info *smi_info)
2586 {
2587         setup_dell_poweredge_oem_data_handler(smi_info);
2588 }
2589
2590 static void setup_xaction_handlers(struct smi_info *smi_info)
2591 {
2592         setup_dell_poweredge_bt_xaction_handler(smi_info);
2593 }
2594
2595 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2596 {
2597         if (smi_info->intf) {
2598                 /* The timer and thread are only running if the
2599                    interface has been started up and registered. */
2600                 if (smi_info->thread != NULL)
2601                         kthread_stop(smi_info->thread);
2602                 del_timer_sync(&smi_info->si_timer);
2603         }
2604 }
2605
2606 static __devinitdata struct ipmi_default_vals
2607 {
2608         int type;
2609         int port;
2610 } ipmi_defaults[] =
2611 {
2612         { .type = SI_KCS, .port = 0xca2 },
2613         { .type = SI_SMIC, .port = 0xca9 },
2614         { .type = SI_BT, .port = 0xe4 },
2615         { .port = 0 }
2616 };
2617
2618 static __devinit void default_find_bmc(void)
2619 {
2620         struct smi_info *info;
2621         int             i;
2622
2623         for (i = 0; ; i++) {
2624                 if (!ipmi_defaults[i].port)
2625                         break;
2626
2627                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2628                 if (!info)
2629                         return;
2630
2631 #ifdef CONFIG_PPC_MERGE
2632                 if (check_legacy_ioport(ipmi_defaults[i].port))
2633                         continue;
2634 #endif
2635
2636                 info->addr_source = NULL;
2637
2638                 info->si_type = ipmi_defaults[i].type;
2639                 info->io_setup = port_setup;
2640                 info->io.addr_data = ipmi_defaults[i].port;
2641                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2642
2643                 info->io.addr = NULL;
2644                 info->io.regspacing = DEFAULT_REGSPACING;
2645                 info->io.regsize = DEFAULT_REGSPACING;
2646                 info->io.regshift = 0;
2647
2648                 if (try_smi_init(info) == 0) {
2649                         /* Found one... */
2650                         printk(KERN_INFO "ipmi_si: Found default %s state"
2651                                " machine at %s address 0x%lx\n",
2652                                si_to_str[info->si_type],
2653                                addr_space_to_str[info->io.addr_type],
2654                                info->io.addr_data);
2655                         return;
2656                 }
2657         }
2658 }
2659
2660 static int is_new_interface(struct smi_info *info)
2661 {
2662         struct smi_info *e;
2663
2664         list_for_each_entry(e, &smi_infos, link) {
2665                 if (e->io.addr_type != info->io.addr_type)
2666                         continue;
2667                 if (e->io.addr_data == info->io.addr_data)
2668                         return 0;
2669         }
2670
2671         return 1;
2672 }
2673
2674 static int try_smi_init(struct smi_info *new_smi)
2675 {
2676         int rv;
2677
2678         if (new_smi->addr_source) {
2679                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2680                        " machine at %s address 0x%lx, slave address 0x%x,"
2681                        " irq %d\n",
2682                        new_smi->addr_source,
2683                        si_to_str[new_smi->si_type],
2684                        addr_space_to_str[new_smi->io.addr_type],
2685                        new_smi->io.addr_data,
2686                        new_smi->slave_addr, new_smi->irq);
2687         }
2688
2689         mutex_lock(&smi_infos_lock);
2690         if (!is_new_interface(new_smi)) {
2691                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2692                 rv = -EBUSY;
2693                 goto out_err;
2694         }
2695
2696         /* So we know not to free it unless we have allocated one. */
2697         new_smi->intf = NULL;
2698         new_smi->si_sm = NULL;
2699         new_smi->handlers = NULL;
2700
2701         switch (new_smi->si_type) {
2702         case SI_KCS:
2703                 new_smi->handlers = &kcs_smi_handlers;
2704                 break;
2705
2706         case SI_SMIC:
2707                 new_smi->handlers = &smic_smi_handlers;
2708                 break;
2709
2710         case SI_BT:
2711                 new_smi->handlers = &bt_smi_handlers;
2712                 break;
2713
2714         default:
2715                 /* No support for anything else yet. */
2716                 rv = -EIO;
2717                 goto out_err;
2718         }
2719
2720         /* Allocate the state machine's data and initialize it. */
2721         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2722         if (!new_smi->si_sm) {
2723                 printk(" Could not allocate state machine memory\n");
2724                 rv = -ENOMEM;
2725                 goto out_err;
2726         }
2727         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2728                                                         &new_smi->io);
2729
2730         /* Now that we know the I/O size, we can set up the I/O. */
2731         rv = new_smi->io_setup(new_smi);
2732         if (rv) {
2733                 printk(" Could not set up I/O space\n");
2734                 goto out_err;
2735         }
2736
2737         spin_lock_init(&(new_smi->si_lock));
2738         spin_lock_init(&(new_smi->msg_lock));
2739         spin_lock_init(&(new_smi->count_lock));
2740
2741         /* Do low-level detection first. */
2742         if (new_smi->handlers->detect(new_smi->si_sm)) {
2743                 if (new_smi->addr_source)
2744                         printk(KERN_INFO "ipmi_si: Interface detection"
2745                                " failed\n");
2746                 rv = -ENODEV;
2747                 goto out_err;
2748         }
2749
2750         /* Attempt a get device id command.  If it fails, we probably
2751            don't have a BMC here. */
2752         rv = try_get_dev_id(new_smi);
2753         if (rv) {
2754                 if (new_smi->addr_source)
2755                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2756                                " at this location\n");
2757                 goto out_err;
2758         }
2759
2760         setup_oem_data_handler(new_smi);
2761         setup_xaction_handlers(new_smi);
2762
2763         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2764         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2765         new_smi->curr_msg = NULL;
2766         atomic_set(&new_smi->req_events, 0);
2767         new_smi->run_to_completion = 0;
2768
2769         new_smi->interrupt_disabled = 0;
2770         atomic_set(&new_smi->stop_operation, 0);
2771         new_smi->intf_num = smi_num;
2772         smi_num++;
2773
2774         /* Start clearing the flags before we enable interrupts or the
2775            timer to avoid racing with the timer. */
2776         start_clear_flags(new_smi);
2777         /* IRQ is defined to be set when non-zero. */
2778         if (new_smi->irq)
2779                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2780
2781         if (!new_smi->dev) {
2782                 /* If we don't already have a device from something
2783                  * else (like PCI), then register a new one. */
2784                 new_smi->pdev = platform_device_alloc("ipmi_si",
2785                                                       new_smi->intf_num);
2786                 if (rv) {
2787                         printk(KERN_ERR
2788                                "ipmi_si_intf:"
2789                                " Unable to allocate platform device\n");
2790                         goto out_err;
2791                 }
2792                 new_smi->dev = &new_smi->pdev->dev;
2793                 new_smi->dev->driver = &ipmi_driver;
2794
2795                 rv = platform_device_add(new_smi->pdev);
2796                 if (rv) {
2797                         printk(KERN_ERR
2798                                "ipmi_si_intf:"
2799                                " Unable to register system interface device:"
2800                                " %d\n",
2801                                rv);
2802                         goto out_err;
2803                 }
2804                 new_smi->dev_registered = 1;
2805         }
2806
2807         rv = ipmi_register_smi(&handlers,
2808                                new_smi,
2809                                &new_smi->device_id,
2810                                new_smi->dev,
2811                                "bmc",
2812                                new_smi->slave_addr);
2813         if (rv) {
2814                 printk(KERN_ERR
2815                        "ipmi_si: Unable to register device: error %d\n",
2816                        rv);
2817                 goto out_err_stop_timer;
2818         }
2819
2820         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2821                                      type_file_read_proc, NULL,
2822                                      new_smi, THIS_MODULE);
2823         if (rv) {
2824                 printk(KERN_ERR
2825                        "ipmi_si: Unable to create proc entry: %d\n",
2826                        rv);
2827                 goto out_err_stop_timer;
2828         }
2829
2830         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2831                                      stat_file_read_proc, NULL,
2832                                      new_smi, THIS_MODULE);
2833         if (rv) {
2834                 printk(KERN_ERR
2835                        "ipmi_si: Unable to create proc entry: %d\n",
2836                        rv);
2837                 goto out_err_stop_timer;
2838         }
2839
2840         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2841                                      param_read_proc, NULL,
2842                                      new_smi, THIS_MODULE);
2843         if (rv) {
2844                 printk(KERN_ERR
2845                        "ipmi_si: Unable to create proc entry: %d\n",
2846                        rv);
2847                 goto out_err_stop_timer;
2848         }
2849
2850         list_add_tail(&new_smi->link, &smi_infos);
2851
2852         mutex_unlock(&smi_infos_lock);
2853
2854         printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2855
2856         return 0;
2857
2858  out_err_stop_timer:
2859         atomic_inc(&new_smi->stop_operation);
2860         wait_for_timer_and_thread(new_smi);
2861
2862  out_err:
2863         if (new_smi->intf)
2864                 ipmi_unregister_smi(new_smi->intf);
2865
2866         if (new_smi->irq_cleanup)
2867                 new_smi->irq_cleanup(new_smi);
2868
2869         /* Wait until we know that we are out of any interrupt
2870            handlers might have been running before we freed the
2871            interrupt. */
2872         synchronize_sched();
2873
2874         if (new_smi->si_sm) {
2875                 if (new_smi->handlers)
2876                         new_smi->handlers->cleanup(new_smi->si_sm);
2877                 kfree(new_smi->si_sm);
2878         }
2879         if (new_smi->addr_source_cleanup)
2880                 new_smi->addr_source_cleanup(new_smi);
2881         if (new_smi->io_cleanup)
2882                 new_smi->io_cleanup(new_smi);
2883
2884         if (new_smi->dev_registered)
2885                 platform_device_unregister(new_smi->pdev);
2886
2887         kfree(new_smi);
2888
2889         mutex_unlock(&smi_infos_lock);
2890
2891         return rv;
2892 }
2893
2894 static __devinit int init_ipmi_si(void)
2895 {
2896         int  i;
2897         char *str;
2898         int  rv;
2899
2900         if (initialized)
2901                 return 0;
2902         initialized = 1;
2903
2904         /* Register the device drivers. */
2905         rv = driver_register(&ipmi_driver);
2906         if (rv) {
2907                 printk(KERN_ERR
2908                        "init_ipmi_si: Unable to register driver: %d\n",
2909                        rv);
2910                 return rv;
2911         }
2912
2913
2914         /* Parse out the si_type string into its components. */
2915         str = si_type_str;
2916         if (*str != '\0') {
2917                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2918                         si_type[i] = str;
2919                         str = strchr(str, ',');
2920                         if (str) {
2921                                 *str = '\0';
2922                                 str++;
2923                         } else {
2924                                 break;
2925                         }
2926                 }
2927         }
2928
2929         printk(KERN_INFO "IPMI System Interface driver.\n");
2930
2931         hardcode_find_bmc();
2932
2933 #ifdef CONFIG_DMI
2934         dmi_find_bmc();
2935 #endif
2936
2937 #ifdef CONFIG_ACPI
2938         acpi_find_bmc();
2939 #endif
2940
2941 #ifdef CONFIG_PCI
2942         rv = pci_register_driver(&ipmi_pci_driver);
2943         if (rv){
2944                 printk(KERN_ERR
2945                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2946                        rv);
2947         }
2948 #endif
2949
2950 #ifdef CONFIG_PPC_OF
2951         of_register_platform_driver(&ipmi_of_platform_driver);
2952 #endif
2953
2954         if (si_trydefaults) {
2955                 mutex_lock(&smi_infos_lock);
2956                 if (list_empty(&smi_infos)) {
2957                         /* No BMC was found, try defaults. */
2958                         mutex_unlock(&smi_infos_lock);
2959                         default_find_bmc();
2960                 } else {
2961                         mutex_unlock(&smi_infos_lock);
2962                 }
2963         }
2964
2965         mutex_lock(&smi_infos_lock);
2966         if (unload_when_empty && list_empty(&smi_infos)) {
2967                 mutex_unlock(&smi_infos_lock);
2968 #ifdef CONFIG_PCI
2969                 pci_unregister_driver(&ipmi_pci_driver);
2970 #endif
2971
2972 #ifdef CONFIG_PPC_OF
2973                 of_unregister_platform_driver(&ipmi_of_platform_driver);
2974 #endif
2975                 driver_unregister(&ipmi_driver);
2976                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2977                 return -ENODEV;
2978         } else {
2979                 mutex_unlock(&smi_infos_lock);
2980                 return 0;
2981         }
2982 }
2983 module_init(init_ipmi_si);
2984
2985 static void cleanup_one_si(struct smi_info *to_clean)
2986 {
2987         int           rv;
2988         unsigned long flags;
2989
2990         if (!to_clean)
2991                 return;
2992
2993         list_del(&to_clean->link);
2994
2995         /* Tell the driver that we are shutting down. */
2996         atomic_inc(&to_clean->stop_operation);
2997
2998         /* Make sure the timer and thread are stopped and will not run
2999            again. */
3000         wait_for_timer_and_thread(to_clean);
3001
3002         /* Timeouts are stopped, now make sure the interrupts are off
3003            for the device.  A little tricky with locks to make sure
3004            there are no races. */
3005         spin_lock_irqsave(&to_clean->si_lock, flags);
3006         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3007                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3008                 poll(to_clean);
3009                 schedule_timeout_uninterruptible(1);
3010                 spin_lock_irqsave(&to_clean->si_lock, flags);
3011         }
3012         disable_si_irq(to_clean);
3013         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3014         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3015                 poll(to_clean);
3016                 schedule_timeout_uninterruptible(1);
3017         }
3018
3019         /* Clean up interrupts and make sure that everything is done. */
3020         if (to_clean->irq_cleanup)
3021                 to_clean->irq_cleanup(to_clean);
3022         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3023                 poll(to_clean);
3024                 schedule_timeout_uninterruptible(1);
3025         }
3026
3027         rv = ipmi_unregister_smi(to_clean->intf);
3028         if (rv) {
3029                 printk(KERN_ERR
3030                        "ipmi_si: Unable to unregister device: errno=%d\n",
3031                        rv);
3032         }
3033
3034         to_clean->handlers->cleanup(to_clean->si_sm);
3035
3036         kfree(to_clean->si_sm);
3037
3038         if (to_clean->addr_source_cleanup)
3039                 to_clean->addr_source_cleanup(to_clean);
3040         if (to_clean->io_cleanup)
3041                 to_clean->io_cleanup(to_clean);
3042
3043         if (to_clean->dev_registered)
3044                 platform_device_unregister(to_clean->pdev);
3045
3046         kfree(to_clean);
3047 }
3048
3049 static __exit void cleanup_ipmi_si(void)
3050 {
3051         struct smi_info *e, *tmp_e;
3052
3053         if (!initialized)
3054                 return;
3055
3056 #ifdef CONFIG_PCI
3057         pci_unregister_driver(&ipmi_pci_driver);
3058 #endif
3059
3060 #ifdef CONFIG_PPC_OF
3061         of_unregister_platform_driver(&ipmi_of_platform_driver);
3062 #endif
3063
3064         mutex_lock(&smi_infos_lock);
3065         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3066                 cleanup_one_si(e);
3067         mutex_unlock(&smi_infos_lock);
3068
3069         driver_unregister(&ipmi_driver);
3070 }
3071 module_exit(cleanup_ipmi_si);
3072
3073 MODULE_LICENSE("GPL");
3074 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3075 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");