[PATCH] IPMI: system interface hotplug
[safe/jmp/linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
56 #include <asm/irq.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
60 #include <asm/io.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
66
67 #define PFX "ipmi_si: "
68
69 /* Measure times between events in the driver. */
70 #undef DEBUG_TIMING
71
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC    10000
74 #define SI_USEC_PER_JIFFY       (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
77                                        short timeout */
78
79 enum si_intf_state {
80         SI_NORMAL,
81         SI_GETTING_FLAGS,
82         SI_GETTING_EVENTS,
83         SI_CLEARING_FLAGS,
84         SI_CLEARING_FLAGS_THEN_SET_IRQ,
85         SI_GETTING_MESSAGES,
86         SI_ENABLE_INTERRUPTS1,
87         SI_ENABLE_INTERRUPTS2
88         /* FIXME - add watchdog stuff. */
89 };
90
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG             2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
95
96 enum si_type {
97     SI_KCS, SI_SMIC, SI_BT
98 };
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
100
101 #define DEVICE_NAME "ipmi_si"
102
103 static struct device_driver ipmi_driver =
104 {
105         .name = DEVICE_NAME,
106         .bus = &platform_bus_type
107 };
108
109 struct smi_info
110 {
111         int                    intf_num;
112         ipmi_smi_t             intf;
113         struct si_sm_data      *si_sm;
114         struct si_sm_handlers  *handlers;
115         enum si_type           si_type;
116         spinlock_t             si_lock;
117         spinlock_t             msg_lock;
118         struct list_head       xmit_msgs;
119         struct list_head       hp_xmit_msgs;
120         struct ipmi_smi_msg    *curr_msg;
121         enum si_intf_state     si_state;
122
123         /* Used to handle the various types of I/O that can occur with
124            IPMI */
125         struct si_sm_io io;
126         int (*io_setup)(struct smi_info *info);
127         void (*io_cleanup)(struct smi_info *info);
128         int (*irq_setup)(struct smi_info *info);
129         void (*irq_cleanup)(struct smi_info *info);
130         unsigned int io_size;
131         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132         void (*addr_source_cleanup)(struct smi_info *info);
133         void *addr_source_data;
134
135         /* Per-OEM handler, called from handle_flags().
136            Returns 1 when handle_flags() needs to be re-run
137            or 0 indicating it set si_state itself.
138         */
139         int (*oem_data_avail_handler)(struct smi_info *smi_info);
140
141         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142            is set to hold the flags until we are done handling everything
143            from the flags. */
144 #define RECEIVE_MSG_AVAIL       0x01
145 #define EVENT_MSG_BUFFER_FULL   0x02
146 #define WDT_PRE_TIMEOUT_INT     0x08
147 #define OEM0_DATA_AVAIL     0x20
148 #define OEM1_DATA_AVAIL     0x40
149 #define OEM2_DATA_AVAIL     0x80
150 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
151                              OEM1_DATA_AVAIL | \
152                              OEM2_DATA_AVAIL)
153         unsigned char       msg_flags;
154
155         /* If set to true, this will request events the next time the
156            state machine is idle. */
157         atomic_t            req_events;
158
159         /* If true, run the state machine to completion on every send
160            call.  Generally used after a panic to make sure stuff goes
161            out. */
162         int                 run_to_completion;
163
164         /* The I/O port of an SI interface. */
165         int                 port;
166
167         /* The space between start addresses of the two ports.  For
168            instance, if the first port is 0xca2 and the spacing is 4, then
169            the second port is 0xca6. */
170         unsigned int        spacing;
171
172         /* zero if no irq; */
173         int                 irq;
174
175         /* The timer for this si. */
176         struct timer_list   si_timer;
177
178         /* The time (in jiffies) the last timeout occurred at. */
179         unsigned long       last_timeout_jiffies;
180
181         /* Used to gracefully stop the timer without race conditions. */
182         atomic_t            stop_operation;
183
184         /* The driver will disable interrupts when it gets into a
185            situation where it cannot handle messages due to lack of
186            memory.  Once that situation clears up, it will re-enable
187            interrupts. */
188         int interrupt_disabled;
189
190         /* From the get device id response... */
191         struct ipmi_device_id device_id;
192
193         /* Driver model stuff. */
194         struct device *dev;
195         struct platform_device *pdev;
196
197          /* True if we allocated the device, false if it came from
198           * someplace else (like PCI). */
199         int dev_registered;
200
201         /* Slave address, could be reported from DMI. */
202         unsigned char slave_addr;
203
204         /* Counters and things for the proc filesystem. */
205         spinlock_t count_lock;
206         unsigned long short_timeouts;
207         unsigned long long_timeouts;
208         unsigned long timeout_restarts;
209         unsigned long idles;
210         unsigned long interrupts;
211         unsigned long attentions;
212         unsigned long flag_fetches;
213         unsigned long hosed_count;
214         unsigned long complete_transactions;
215         unsigned long events;
216         unsigned long watchdog_pretimeouts;
217         unsigned long incoming_messages;
218
219         struct task_struct *thread;
220
221         struct list_head link;
222 };
223
224 #define SI_MAX_PARMS 4
225
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
228
229 static int unload_when_empty = 1;
230
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
233
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
236 {
237         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
238 }
239
240 static void deliver_recv_msg(struct smi_info *smi_info,
241                              struct ipmi_smi_msg *msg)
242 {
243         /* Deliver the message to the upper layer with the lock
244            released. */
245         spin_unlock(&(smi_info->si_lock));
246         ipmi_smi_msg_received(smi_info->intf, msg);
247         spin_lock(&(smi_info->si_lock));
248 }
249
250 static void return_hosed_msg(struct smi_info *smi_info)
251 {
252         struct ipmi_smi_msg *msg = smi_info->curr_msg;
253
254         /* Make it a reponse */
255         msg->rsp[0] = msg->data[0] | 4;
256         msg->rsp[1] = msg->data[1];
257         msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
258         msg->rsp_size = 3;
259
260         smi_info->curr_msg = NULL;
261         deliver_recv_msg(smi_info, msg);
262 }
263
264 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
265 {
266         int              rv;
267         struct list_head *entry = NULL;
268 #ifdef DEBUG_TIMING
269         struct timeval t;
270 #endif
271
272         /* No need to save flags, we aleady have interrupts off and we
273            already hold the SMI lock. */
274         spin_lock(&(smi_info->msg_lock));
275
276         /* Pick the high priority queue first. */
277         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
278                 entry = smi_info->hp_xmit_msgs.next;
279         } else if (!list_empty(&(smi_info->xmit_msgs))) {
280                 entry = smi_info->xmit_msgs.next;
281         }
282
283         if (!entry) {
284                 smi_info->curr_msg = NULL;
285                 rv = SI_SM_IDLE;
286         } else {
287                 int err;
288
289                 list_del(entry);
290                 smi_info->curr_msg = list_entry(entry,
291                                                 struct ipmi_smi_msg,
292                                                 link);
293 #ifdef DEBUG_TIMING
294                 do_gettimeofday(&t);
295                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
296 #endif
297                 err = atomic_notifier_call_chain(&xaction_notifier_list,
298                                 0, smi_info);
299                 if (err & NOTIFY_STOP_MASK) {
300                         rv = SI_SM_CALL_WITHOUT_DELAY;
301                         goto out;
302                 }
303                 err = smi_info->handlers->start_transaction(
304                         smi_info->si_sm,
305                         smi_info->curr_msg->data,
306                         smi_info->curr_msg->data_size);
307                 if (err) {
308                         return_hosed_msg(smi_info);
309                 }
310
311                 rv = SI_SM_CALL_WITHOUT_DELAY;
312         }
313         out:
314         spin_unlock(&(smi_info->msg_lock));
315
316         return rv;
317 }
318
319 static void start_enable_irq(struct smi_info *smi_info)
320 {
321         unsigned char msg[2];
322
323         /* If we are enabling interrupts, we have to tell the
324            BMC to use them. */
325         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
326         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
327
328         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
329         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
330 }
331
332 static void start_clear_flags(struct smi_info *smi_info)
333 {
334         unsigned char msg[3];
335
336         /* Make sure the watchdog pre-timeout flag is not set at startup. */
337         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
338         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
339         msg[2] = WDT_PRE_TIMEOUT_INT;
340
341         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
342         smi_info->si_state = SI_CLEARING_FLAGS;
343 }
344
345 /* When we have a situtaion where we run out of memory and cannot
346    allocate messages, we just leave them in the BMC and run the system
347    polled until we can allocate some memory.  Once we have some
348    memory, we will re-enable the interrupt. */
349 static inline void disable_si_irq(struct smi_info *smi_info)
350 {
351         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
352                 disable_irq_nosync(smi_info->irq);
353                 smi_info->interrupt_disabled = 1;
354         }
355 }
356
357 static inline void enable_si_irq(struct smi_info *smi_info)
358 {
359         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
360                 enable_irq(smi_info->irq);
361                 smi_info->interrupt_disabled = 0;
362         }
363 }
364
365 static void handle_flags(struct smi_info *smi_info)
366 {
367  retry:
368         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
369                 /* Watchdog pre-timeout */
370                 spin_lock(&smi_info->count_lock);
371                 smi_info->watchdog_pretimeouts++;
372                 spin_unlock(&smi_info->count_lock);
373
374                 start_clear_flags(smi_info);
375                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
376                 spin_unlock(&(smi_info->si_lock));
377                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
378                 spin_lock(&(smi_info->si_lock));
379         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
380                 /* Messages available. */
381                 smi_info->curr_msg = ipmi_alloc_smi_msg();
382                 if (!smi_info->curr_msg) {
383                         disable_si_irq(smi_info);
384                         smi_info->si_state = SI_NORMAL;
385                         return;
386                 }
387                 enable_si_irq(smi_info);
388
389                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
390                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
391                 smi_info->curr_msg->data_size = 2;
392
393                 smi_info->handlers->start_transaction(
394                         smi_info->si_sm,
395                         smi_info->curr_msg->data,
396                         smi_info->curr_msg->data_size);
397                 smi_info->si_state = SI_GETTING_MESSAGES;
398         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
399                 /* Events available. */
400                 smi_info->curr_msg = ipmi_alloc_smi_msg();
401                 if (!smi_info->curr_msg) {
402                         disable_si_irq(smi_info);
403                         smi_info->si_state = SI_NORMAL;
404                         return;
405                 }
406                 enable_si_irq(smi_info);
407
408                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
409                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
410                 smi_info->curr_msg->data_size = 2;
411
412                 smi_info->handlers->start_transaction(
413                         smi_info->si_sm,
414                         smi_info->curr_msg->data,
415                         smi_info->curr_msg->data_size);
416                 smi_info->si_state = SI_GETTING_EVENTS;
417         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
418                    smi_info->oem_data_avail_handler) {
419                 if (smi_info->oem_data_avail_handler(smi_info))
420                         goto retry;
421         } else {
422                 smi_info->si_state = SI_NORMAL;
423         }
424 }
425
426 static void handle_transaction_done(struct smi_info *smi_info)
427 {
428         struct ipmi_smi_msg *msg;
429 #ifdef DEBUG_TIMING
430         struct timeval t;
431
432         do_gettimeofday(&t);
433         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
434 #endif
435         switch (smi_info->si_state) {
436         case SI_NORMAL:
437                 if (!smi_info->curr_msg)
438                         break;
439
440                 smi_info->curr_msg->rsp_size
441                         = smi_info->handlers->get_result(
442                                 smi_info->si_sm,
443                                 smi_info->curr_msg->rsp,
444                                 IPMI_MAX_MSG_LENGTH);
445
446                 /* Do this here becase deliver_recv_msg() releases the
447                    lock, and a new message can be put in during the
448                    time the lock is released. */
449                 msg = smi_info->curr_msg;
450                 smi_info->curr_msg = NULL;
451                 deliver_recv_msg(smi_info, msg);
452                 break;
453
454         case SI_GETTING_FLAGS:
455         {
456                 unsigned char msg[4];
457                 unsigned int  len;
458
459                 /* We got the flags from the SMI, now handle them. */
460                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
461                 if (msg[2] != 0) {
462                         /* Error fetching flags, just give up for
463                            now. */
464                         smi_info->si_state = SI_NORMAL;
465                 } else if (len < 4) {
466                         /* Hmm, no flags.  That's technically illegal, but
467                            don't use uninitialized data. */
468                         smi_info->si_state = SI_NORMAL;
469                 } else {
470                         smi_info->msg_flags = msg[3];
471                         handle_flags(smi_info);
472                 }
473                 break;
474         }
475
476         case SI_CLEARING_FLAGS:
477         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
478         {
479                 unsigned char msg[3];
480
481                 /* We cleared the flags. */
482                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
483                 if (msg[2] != 0) {
484                         /* Error clearing flags */
485                         printk(KERN_WARNING
486                                "ipmi_si: Error clearing flags: %2.2x\n",
487                                msg[2]);
488                 }
489                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
490                         start_enable_irq(smi_info);
491                 else
492                         smi_info->si_state = SI_NORMAL;
493                 break;
494         }
495
496         case SI_GETTING_EVENTS:
497         {
498                 smi_info->curr_msg->rsp_size
499                         = smi_info->handlers->get_result(
500                                 smi_info->si_sm,
501                                 smi_info->curr_msg->rsp,
502                                 IPMI_MAX_MSG_LENGTH);
503
504                 /* Do this here becase deliver_recv_msg() releases the
505                    lock, and a new message can be put in during the
506                    time the lock is released. */
507                 msg = smi_info->curr_msg;
508                 smi_info->curr_msg = NULL;
509                 if (msg->rsp[2] != 0) {
510                         /* Error getting event, probably done. */
511                         msg->done(msg);
512
513                         /* Take off the event flag. */
514                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
515                         handle_flags(smi_info);
516                 } else {
517                         spin_lock(&smi_info->count_lock);
518                         smi_info->events++;
519                         spin_unlock(&smi_info->count_lock);
520
521                         /* Do this before we deliver the message
522                            because delivering the message releases the
523                            lock and something else can mess with the
524                            state. */
525                         handle_flags(smi_info);
526
527                         deliver_recv_msg(smi_info, msg);
528                 }
529                 break;
530         }
531
532         case SI_GETTING_MESSAGES:
533         {
534                 smi_info->curr_msg->rsp_size
535                         = smi_info->handlers->get_result(
536                                 smi_info->si_sm,
537                                 smi_info->curr_msg->rsp,
538                                 IPMI_MAX_MSG_LENGTH);
539
540                 /* Do this here becase deliver_recv_msg() releases the
541                    lock, and a new message can be put in during the
542                    time the lock is released. */
543                 msg = smi_info->curr_msg;
544                 smi_info->curr_msg = NULL;
545                 if (msg->rsp[2] != 0) {
546                         /* Error getting event, probably done. */
547                         msg->done(msg);
548
549                         /* Take off the msg flag. */
550                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
551                         handle_flags(smi_info);
552                 } else {
553                         spin_lock(&smi_info->count_lock);
554                         smi_info->incoming_messages++;
555                         spin_unlock(&smi_info->count_lock);
556
557                         /* Do this before we deliver the message
558                            because delivering the message releases the
559                            lock and something else can mess with the
560                            state. */
561                         handle_flags(smi_info);
562
563                         deliver_recv_msg(smi_info, msg);
564                 }
565                 break;
566         }
567
568         case SI_ENABLE_INTERRUPTS1:
569         {
570                 unsigned char msg[4];
571
572                 /* We got the flags from the SMI, now handle them. */
573                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
574                 if (msg[2] != 0) {
575                         printk(KERN_WARNING
576                                "ipmi_si: Could not enable interrupts"
577                                ", failed get, using polled mode.\n");
578                         smi_info->si_state = SI_NORMAL;
579                 } else {
580                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
581                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
582                         msg[2] = msg[3] | 1; /* enable msg queue int */
583                         smi_info->handlers->start_transaction(
584                                 smi_info->si_sm, msg, 3);
585                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
586                 }
587                 break;
588         }
589
590         case SI_ENABLE_INTERRUPTS2:
591         {
592                 unsigned char msg[4];
593
594                 /* We got the flags from the SMI, now handle them. */
595                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
596                 if (msg[2] != 0) {
597                         printk(KERN_WARNING
598                                "ipmi_si: Could not enable interrupts"
599                                ", failed set, using polled mode.\n");
600                 }
601                 smi_info->si_state = SI_NORMAL;
602                 break;
603         }
604         }
605 }
606
607 /* Called on timeouts and events.  Timeouts should pass the elapsed
608    time, interrupts should pass in zero. */
609 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
610                                            int time)
611 {
612         enum si_sm_result si_sm_result;
613
614  restart:
615         /* There used to be a loop here that waited a little while
616            (around 25us) before giving up.  That turned out to be
617            pointless, the minimum delays I was seeing were in the 300us
618            range, which is far too long to wait in an interrupt.  So
619            we just run until the state machine tells us something
620            happened or it needs a delay. */
621         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
622         time = 0;
623         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
624         {
625                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
626         }
627
628         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
629         {
630                 spin_lock(&smi_info->count_lock);
631                 smi_info->complete_transactions++;
632                 spin_unlock(&smi_info->count_lock);
633
634                 handle_transaction_done(smi_info);
635                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
636         }
637         else if (si_sm_result == SI_SM_HOSED)
638         {
639                 spin_lock(&smi_info->count_lock);
640                 smi_info->hosed_count++;
641                 spin_unlock(&smi_info->count_lock);
642
643                 /* Do the before return_hosed_msg, because that
644                    releases the lock. */
645                 smi_info->si_state = SI_NORMAL;
646                 if (smi_info->curr_msg != NULL) {
647                         /* If we were handling a user message, format
648                            a response to send to the upper layer to
649                            tell it about the error. */
650                         return_hosed_msg(smi_info);
651                 }
652                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
653         }
654
655         /* We prefer handling attn over new messages. */
656         if (si_sm_result == SI_SM_ATTN)
657         {
658                 unsigned char msg[2];
659
660                 spin_lock(&smi_info->count_lock);
661                 smi_info->attentions++;
662                 spin_unlock(&smi_info->count_lock);
663
664                 /* Got a attn, send down a get message flags to see
665                    what's causing it.  It would be better to handle
666                    this in the upper layer, but due to the way
667                    interrupts work with the SMI, that's not really
668                    possible. */
669                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
670                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
671
672                 smi_info->handlers->start_transaction(
673                         smi_info->si_sm, msg, 2);
674                 smi_info->si_state = SI_GETTING_FLAGS;
675                 goto restart;
676         }
677
678         /* If we are currently idle, try to start the next message. */
679         if (si_sm_result == SI_SM_IDLE) {
680                 spin_lock(&smi_info->count_lock);
681                 smi_info->idles++;
682                 spin_unlock(&smi_info->count_lock);
683
684                 si_sm_result = start_next_msg(smi_info);
685                 if (si_sm_result != SI_SM_IDLE)
686                         goto restart;
687         }
688
689         if ((si_sm_result == SI_SM_IDLE)
690             && (atomic_read(&smi_info->req_events)))
691         {
692                 /* We are idle and the upper layer requested that I fetch
693                    events, so do so. */
694                 atomic_set(&smi_info->req_events, 0);
695
696                 smi_info->curr_msg = ipmi_alloc_smi_msg();
697                 if (!smi_info->curr_msg)
698                         goto out;
699
700                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
701                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
702                 smi_info->curr_msg->data_size = 2;
703
704                 smi_info->handlers->start_transaction(
705                         smi_info->si_sm,
706                         smi_info->curr_msg->data,
707                         smi_info->curr_msg->data_size);
708                 smi_info->si_state = SI_GETTING_EVENTS;
709                 goto restart;
710         }
711  out:
712         return si_sm_result;
713 }
714
715 static void sender(void                *send_info,
716                    struct ipmi_smi_msg *msg,
717                    int                 priority)
718 {
719         struct smi_info   *smi_info = send_info;
720         enum si_sm_result result;
721         unsigned long     flags;
722 #ifdef DEBUG_TIMING
723         struct timeval    t;
724 #endif
725
726         if (atomic_read(&smi_info->stop_operation)) {
727                 msg->rsp[0] = msg->data[0] | 4;
728                 msg->rsp[1] = msg->data[1];
729                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
730                 msg->rsp_size = 3;
731                 deliver_recv_msg(smi_info, msg);
732                 return;
733         }
734
735         spin_lock_irqsave(&(smi_info->msg_lock), flags);
736 #ifdef DEBUG_TIMING
737         do_gettimeofday(&t);
738         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
739 #endif
740
741         if (smi_info->run_to_completion) {
742                 /* If we are running to completion, then throw it in
743                    the list and run transactions until everything is
744                    clear.  Priority doesn't matter here. */
745                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
746
747                 /* We have to release the msg lock and claim the smi
748                    lock in this case, because of race conditions. */
749                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
750
751                 spin_lock_irqsave(&(smi_info->si_lock), flags);
752                 result = smi_event_handler(smi_info, 0);
753                 while (result != SI_SM_IDLE) {
754                         udelay(SI_SHORT_TIMEOUT_USEC);
755                         result = smi_event_handler(smi_info,
756                                                    SI_SHORT_TIMEOUT_USEC);
757                 }
758                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
759                 return;
760         } else {
761                 if (priority > 0) {
762                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
763                 } else {
764                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
765                 }
766         }
767         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
768
769         spin_lock_irqsave(&(smi_info->si_lock), flags);
770         if ((smi_info->si_state == SI_NORMAL)
771             && (smi_info->curr_msg == NULL))
772         {
773                 start_next_msg(smi_info);
774         }
775         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
776 }
777
778 static void set_run_to_completion(void *send_info, int i_run_to_completion)
779 {
780         struct smi_info   *smi_info = send_info;
781         enum si_sm_result result;
782         unsigned long     flags;
783
784         spin_lock_irqsave(&(smi_info->si_lock), flags);
785
786         smi_info->run_to_completion = i_run_to_completion;
787         if (i_run_to_completion) {
788                 result = smi_event_handler(smi_info, 0);
789                 while (result != SI_SM_IDLE) {
790                         udelay(SI_SHORT_TIMEOUT_USEC);
791                         result = smi_event_handler(smi_info,
792                                                    SI_SHORT_TIMEOUT_USEC);
793                 }
794         }
795
796         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
797 }
798
799 static int ipmi_thread(void *data)
800 {
801         struct smi_info *smi_info = data;
802         unsigned long flags;
803         enum si_sm_result smi_result;
804
805         set_user_nice(current, 19);
806         while (!kthread_should_stop()) {
807                 spin_lock_irqsave(&(smi_info->si_lock), flags);
808                 smi_result = smi_event_handler(smi_info, 0);
809                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
810                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
811                         /* do nothing */
812                 }
813                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
814                         schedule();
815                 else
816                         schedule_timeout_interruptible(1);
817         }
818         return 0;
819 }
820
821
822 static void poll(void *send_info)
823 {
824         struct smi_info *smi_info = send_info;
825
826         /*
827          * Make sure there is some delay in the poll loop so we can
828          * drive time forward and timeout things.
829          */
830         udelay(10);
831         smi_event_handler(smi_info, 10);
832 }
833
834 static void request_events(void *send_info)
835 {
836         struct smi_info *smi_info = send_info;
837
838         if (atomic_read(&smi_info->stop_operation))
839                 return;
840
841         atomic_set(&smi_info->req_events, 1);
842 }
843
844 static int initialized = 0;
845
846 static void smi_timeout(unsigned long data)
847 {
848         struct smi_info   *smi_info = (struct smi_info *) data;
849         enum si_sm_result smi_result;
850         unsigned long     flags;
851         unsigned long     jiffies_now;
852         long              time_diff;
853 #ifdef DEBUG_TIMING
854         struct timeval    t;
855 #endif
856
857         if (atomic_read(&smi_info->stop_operation))
858                 return;
859
860         spin_lock_irqsave(&(smi_info->si_lock), flags);
861 #ifdef DEBUG_TIMING
862         do_gettimeofday(&t);
863         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
864 #endif
865         jiffies_now = jiffies;
866         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
867                      * SI_USEC_PER_JIFFY);
868         smi_result = smi_event_handler(smi_info, time_diff);
869
870         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
871
872         smi_info->last_timeout_jiffies = jiffies_now;
873
874         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
875                 /* Running with interrupts, only do long timeouts. */
876                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
877                 spin_lock_irqsave(&smi_info->count_lock, flags);
878                 smi_info->long_timeouts++;
879                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
880                 goto do_add_timer;
881         }
882
883         /* If the state machine asks for a short delay, then shorten
884            the timer timeout. */
885         if (smi_result == SI_SM_CALL_WITH_DELAY) {
886                 spin_lock_irqsave(&smi_info->count_lock, flags);
887                 smi_info->short_timeouts++;
888                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
889                 smi_info->si_timer.expires = jiffies + 1;
890         } else {
891                 spin_lock_irqsave(&smi_info->count_lock, flags);
892                 smi_info->long_timeouts++;
893                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
894                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
895         }
896
897  do_add_timer:
898         add_timer(&(smi_info->si_timer));
899 }
900
901 static irqreturn_t si_irq_handler(int irq, void *data)
902 {
903         struct smi_info *smi_info = data;
904         unsigned long   flags;
905 #ifdef DEBUG_TIMING
906         struct timeval  t;
907 #endif
908
909         spin_lock_irqsave(&(smi_info->si_lock), flags);
910
911         spin_lock(&smi_info->count_lock);
912         smi_info->interrupts++;
913         spin_unlock(&smi_info->count_lock);
914
915         if (atomic_read(&smi_info->stop_operation))
916                 goto out;
917
918 #ifdef DEBUG_TIMING
919         do_gettimeofday(&t);
920         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
921 #endif
922         smi_event_handler(smi_info, 0);
923  out:
924         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
925         return IRQ_HANDLED;
926 }
927
928 static irqreturn_t si_bt_irq_handler(int irq, void *data)
929 {
930         struct smi_info *smi_info = data;
931         /* We need to clear the IRQ flag for the BT interface. */
932         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
933                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
934                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
935         return si_irq_handler(irq, data);
936 }
937
938 static int smi_start_processing(void       *send_info,
939                                 ipmi_smi_t intf)
940 {
941         struct smi_info *new_smi = send_info;
942         int             enable = 0;
943
944         new_smi->intf = intf;
945
946         /* Set up the timer that drives the interface. */
947         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
948         new_smi->last_timeout_jiffies = jiffies;
949         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
950
951         /*
952          * Check if the user forcefully enabled the daemon.
953          */
954         if (new_smi->intf_num < num_force_kipmid)
955                 enable = force_kipmid[new_smi->intf_num];
956         /*
957          * The BT interface is efficient enough to not need a thread,
958          * and there is no need for a thread if we have interrupts.
959          */
960         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
961                 enable = 1;
962
963         if (enable) {
964                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
965                                               "kipmi%d", new_smi->intf_num);
966                 if (IS_ERR(new_smi->thread)) {
967                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
968                                " kernel thread due to error %ld, only using"
969                                " timers to drive the interface\n",
970                                PTR_ERR(new_smi->thread));
971                         new_smi->thread = NULL;
972                 }
973         }
974
975         return 0;
976 }
977
978 static void set_maintenance_mode(void *send_info, int enable)
979 {
980         struct smi_info   *smi_info = send_info;
981
982         if (!enable)
983                 atomic_set(&smi_info->req_events, 0);
984 }
985
986 static struct ipmi_smi_handlers handlers =
987 {
988         .owner                  = THIS_MODULE,
989         .start_processing       = smi_start_processing,
990         .sender                 = sender,
991         .request_events         = request_events,
992         .set_maintenance_mode   = set_maintenance_mode,
993         .set_run_to_completion  = set_run_to_completion,
994         .poll                   = poll,
995 };
996
997 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
998    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
999
1000 static LIST_HEAD(smi_infos);
1001 static DEFINE_MUTEX(smi_infos_lock);
1002 static int smi_num; /* Used to sequence the SMIs */
1003
1004 #define DEFAULT_REGSPACING      1
1005
1006 static int           si_trydefaults = 1;
1007 static char          *si_type[SI_MAX_PARMS];
1008 #define MAX_SI_TYPE_STR 30
1009 static char          si_type_str[MAX_SI_TYPE_STR];
1010 static unsigned long addrs[SI_MAX_PARMS];
1011 static int num_addrs;
1012 static unsigned int  ports[SI_MAX_PARMS];
1013 static int num_ports;
1014 static int           irqs[SI_MAX_PARMS];
1015 static int num_irqs;
1016 static int           regspacings[SI_MAX_PARMS];
1017 static int num_regspacings = 0;
1018 static int           regsizes[SI_MAX_PARMS];
1019 static int num_regsizes = 0;
1020 static int           regshifts[SI_MAX_PARMS];
1021 static int num_regshifts = 0;
1022 static int slave_addrs[SI_MAX_PARMS];
1023 static int num_slave_addrs = 0;
1024
1025 #define IPMI_IO_ADDR_SPACE  0
1026 #define IPMI_MEM_ADDR_SPACE 1
1027 static char *addr_space_to_str[] = { "I/O", "mem" };
1028
1029 static int hotmod_handler(const char *val, struct kernel_param *kp);
1030
1031 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1032 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1033                  " Documentation/IPMI.txt in the kernel sources for the"
1034                  " gory details.");
1035
1036 module_param_named(trydefaults, si_trydefaults, bool, 0);
1037 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1038                  " default scan of the KCS and SMIC interface at the standard"
1039                  " address");
1040 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1041 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1042                  " interface separated by commas.  The types are 'kcs',"
1043                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1044                  " the first interface to kcs and the second to bt");
1045 module_param_array(addrs, long, &num_addrs, 0);
1046 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1047                  " addresses separated by commas.  Only use if an interface"
1048                  " is in memory.  Otherwise, set it to zero or leave"
1049                  " it blank.");
1050 module_param_array(ports, int, &num_ports, 0);
1051 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1052                  " addresses separated by commas.  Only use if an interface"
1053                  " is a port.  Otherwise, set it to zero or leave"
1054                  " it blank.");
1055 module_param_array(irqs, int, &num_irqs, 0);
1056 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1057                  " addresses separated by commas.  Only use if an interface"
1058                  " has an interrupt.  Otherwise, set it to zero or leave"
1059                  " it blank.");
1060 module_param_array(regspacings, int, &num_regspacings, 0);
1061 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1062                  " and each successive register used by the interface.  For"
1063                  " instance, if the start address is 0xca2 and the spacing"
1064                  " is 2, then the second address is at 0xca4.  Defaults"
1065                  " to 1.");
1066 module_param_array(regsizes, int, &num_regsizes, 0);
1067 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1068                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1069                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1070                  " the 8-bit IPMI register has to be read from a larger"
1071                  " register.");
1072 module_param_array(regshifts, int, &num_regshifts, 0);
1073 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1074                  " IPMI register, in bits.  For instance, if the data"
1075                  " is read from a 32-bit word and the IPMI data is in"
1076                  " bit 8-15, then the shift would be 8");
1077 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1078 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1079                  " the controller.  Normally this is 0x20, but can be"
1080                  " overridden by this parm.  This is an array indexed"
1081                  " by interface number.");
1082 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1083 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1084                  " disabled(0).  Normally the IPMI driver auto-detects"
1085                  " this, but the value may be overridden by this parm.");
1086 module_param(unload_when_empty, int, 0);
1087 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1088                  " specified or found, default is 1.  Setting to 0"
1089                  " is useful for hot add of devices using hotmod.");
1090
1091
1092 static void std_irq_cleanup(struct smi_info *info)
1093 {
1094         if (info->si_type == SI_BT)
1095                 /* Disable the interrupt in the BT interface. */
1096                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1097         free_irq(info->irq, info);
1098 }
1099
1100 static int std_irq_setup(struct smi_info *info)
1101 {
1102         int rv;
1103
1104         if (!info->irq)
1105                 return 0;
1106
1107         if (info->si_type == SI_BT) {
1108                 rv = request_irq(info->irq,
1109                                  si_bt_irq_handler,
1110                                  IRQF_DISABLED,
1111                                  DEVICE_NAME,
1112                                  info);
1113                 if (!rv)
1114                         /* Enable the interrupt in the BT interface. */
1115                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1116                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1117         } else
1118                 rv = request_irq(info->irq,
1119                                  si_irq_handler,
1120                                  IRQF_DISABLED,
1121                                  DEVICE_NAME,
1122                                  info);
1123         if (rv) {
1124                 printk(KERN_WARNING
1125                        "ipmi_si: %s unable to claim interrupt %d,"
1126                        " running polled\n",
1127                        DEVICE_NAME, info->irq);
1128                 info->irq = 0;
1129         } else {
1130                 info->irq_cleanup = std_irq_cleanup;
1131                 printk("  Using irq %d\n", info->irq);
1132         }
1133
1134         return rv;
1135 }
1136
1137 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1138 {
1139         unsigned int addr = io->addr_data;
1140
1141         return inb(addr + (offset * io->regspacing));
1142 }
1143
1144 static void port_outb(struct si_sm_io *io, unsigned int offset,
1145                       unsigned char b)
1146 {
1147         unsigned int addr = io->addr_data;
1148
1149         outb(b, addr + (offset * io->regspacing));
1150 }
1151
1152 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1153 {
1154         unsigned int addr = io->addr_data;
1155
1156         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1157 }
1158
1159 static void port_outw(struct si_sm_io *io, unsigned int offset,
1160                       unsigned char b)
1161 {
1162         unsigned int addr = io->addr_data;
1163
1164         outw(b << io->regshift, addr + (offset * io->regspacing));
1165 }
1166
1167 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1168 {
1169         unsigned int addr = io->addr_data;
1170
1171         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1172 }
1173
1174 static void port_outl(struct si_sm_io *io, unsigned int offset,
1175                       unsigned char b)
1176 {
1177         unsigned int addr = io->addr_data;
1178
1179         outl(b << io->regshift, addr+(offset * io->regspacing));
1180 }
1181
1182 static void port_cleanup(struct smi_info *info)
1183 {
1184         unsigned int addr = info->io.addr_data;
1185         int          idx;
1186
1187         if (addr) {
1188                 for (idx = 0; idx < info->io_size; idx++) {
1189                         release_region(addr + idx * info->io.regspacing,
1190                                        info->io.regsize);
1191                 }
1192         }
1193 }
1194
1195 static int port_setup(struct smi_info *info)
1196 {
1197         unsigned int addr = info->io.addr_data;
1198         int          idx;
1199
1200         if (!addr)
1201                 return -ENODEV;
1202
1203         info->io_cleanup = port_cleanup;
1204
1205         /* Figure out the actual inb/inw/inl/etc routine to use based
1206            upon the register size. */
1207         switch (info->io.regsize) {
1208         case 1:
1209                 info->io.inputb = port_inb;
1210                 info->io.outputb = port_outb;
1211                 break;
1212         case 2:
1213                 info->io.inputb = port_inw;
1214                 info->io.outputb = port_outw;
1215                 break;
1216         case 4:
1217                 info->io.inputb = port_inl;
1218                 info->io.outputb = port_outl;
1219                 break;
1220         default:
1221                 printk("ipmi_si: Invalid register size: %d\n",
1222                        info->io.regsize);
1223                 return -EINVAL;
1224         }
1225
1226         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1227          * tables.  This causes problems when trying to register the
1228          * entire I/O region.  Therefore we must register each I/O
1229          * port separately.
1230          */
1231         for (idx = 0; idx < info->io_size; idx++) {
1232                 if (request_region(addr + idx * info->io.regspacing,
1233                                    info->io.regsize, DEVICE_NAME) == NULL) {
1234                         /* Undo allocations */
1235                         while (idx--) {
1236                                 release_region(addr + idx * info->io.regspacing,
1237                                                info->io.regsize);
1238                         }
1239                         return -EIO;
1240                 }
1241         }
1242         return 0;
1243 }
1244
1245 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1246 {
1247         return readb((io->addr)+(offset * io->regspacing));
1248 }
1249
1250 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1251                      unsigned char b)
1252 {
1253         writeb(b, (io->addr)+(offset * io->regspacing));
1254 }
1255
1256 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1257 {
1258         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1259                 & 0xff;
1260 }
1261
1262 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1263                      unsigned char b)
1264 {
1265         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1266 }
1267
1268 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1269 {
1270         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1271                 & 0xff;
1272 }
1273
1274 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1275                      unsigned char b)
1276 {
1277         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1278 }
1279
1280 #ifdef readq
1281 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1282 {
1283         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1284                 & 0xff;
1285 }
1286
1287 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1288                      unsigned char b)
1289 {
1290         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1291 }
1292 #endif
1293
1294 static void mem_cleanup(struct smi_info *info)
1295 {
1296         unsigned long addr = info->io.addr_data;
1297         int           mapsize;
1298
1299         if (info->io.addr) {
1300                 iounmap(info->io.addr);
1301
1302                 mapsize = ((info->io_size * info->io.regspacing)
1303                            - (info->io.regspacing - info->io.regsize));
1304
1305                 release_mem_region(addr, mapsize);
1306         }
1307 }
1308
1309 static int mem_setup(struct smi_info *info)
1310 {
1311         unsigned long addr = info->io.addr_data;
1312         int           mapsize;
1313
1314         if (!addr)
1315                 return -ENODEV;
1316
1317         info->io_cleanup = mem_cleanup;
1318
1319         /* Figure out the actual readb/readw/readl/etc routine to use based
1320            upon the register size. */
1321         switch (info->io.regsize) {
1322         case 1:
1323                 info->io.inputb = intf_mem_inb;
1324                 info->io.outputb = intf_mem_outb;
1325                 break;
1326         case 2:
1327                 info->io.inputb = intf_mem_inw;
1328                 info->io.outputb = intf_mem_outw;
1329                 break;
1330         case 4:
1331                 info->io.inputb = intf_mem_inl;
1332                 info->io.outputb = intf_mem_outl;
1333                 break;
1334 #ifdef readq
1335         case 8:
1336                 info->io.inputb = mem_inq;
1337                 info->io.outputb = mem_outq;
1338                 break;
1339 #endif
1340         default:
1341                 printk("ipmi_si: Invalid register size: %d\n",
1342                        info->io.regsize);
1343                 return -EINVAL;
1344         }
1345
1346         /* Calculate the total amount of memory to claim.  This is an
1347          * unusual looking calculation, but it avoids claiming any
1348          * more memory than it has to.  It will claim everything
1349          * between the first address to the end of the last full
1350          * register. */
1351         mapsize = ((info->io_size * info->io.regspacing)
1352                    - (info->io.regspacing - info->io.regsize));
1353
1354         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1355                 return -EIO;
1356
1357         info->io.addr = ioremap(addr, mapsize);
1358         if (info->io.addr == NULL) {
1359                 release_mem_region(addr, mapsize);
1360                 return -EIO;
1361         }
1362         return 0;
1363 }
1364
1365 /*
1366  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1367  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1368  * Options are:
1369  *   rsp=<regspacing>
1370  *   rsi=<regsize>
1371  *   rsh=<regshift>
1372  *   irq=<irq>
1373  *   ipmb=<ipmb addr>
1374  */
1375 enum hotmod_op { HM_ADD, HM_REMOVE };
1376 struct hotmod_vals {
1377         char *name;
1378         int  val;
1379 };
1380 static struct hotmod_vals hotmod_ops[] = {
1381         { "add",        HM_ADD },
1382         { "remove",     HM_REMOVE },
1383         { NULL }
1384 };
1385 static struct hotmod_vals hotmod_si[] = {
1386         { "kcs",        SI_KCS },
1387         { "smic",       SI_SMIC },
1388         { "bt",         SI_BT },
1389         { NULL }
1390 };
1391 static struct hotmod_vals hotmod_as[] = {
1392         { "mem",        IPMI_MEM_ADDR_SPACE },
1393         { "i/o",        IPMI_IO_ADDR_SPACE },
1394         { NULL }
1395 };
1396 static int ipmi_strcasecmp(const char *s1, const char *s2)
1397 {
1398         while (*s1 || *s2) {
1399                 if (!*s1)
1400                         return -1;
1401                 if (!*s2)
1402                         return 1;
1403                 if (*s1 != *s2)
1404                         return *s1 - *s2;
1405                 s1++;
1406                 s2++;
1407         }
1408         return 0;
1409 }
1410 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1411 {
1412         char *s;
1413         int  i;
1414
1415         s = strchr(*curr, ',');
1416         if (!s) {
1417                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1418                 return -EINVAL;
1419         }
1420         *s = '\0';
1421         s++;
1422         for (i = 0; hotmod_ops[i].name; i++) {
1423                 if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
1424                         *val = v[i].val;
1425                         *curr = s;
1426                         return 0;
1427                 }
1428         }
1429
1430         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1431         return -EINVAL;
1432 }
1433
1434 static int hotmod_handler(const char *val, struct kernel_param *kp)
1435 {
1436         char *str = kstrdup(val, GFP_KERNEL);
1437         int  rv = -EINVAL;
1438         char *next, *curr, *s, *n, *o;
1439         enum hotmod_op op;
1440         enum si_type si_type;
1441         int  addr_space;
1442         unsigned long addr;
1443         int regspacing;
1444         int regsize;
1445         int regshift;
1446         int irq;
1447         int ipmb;
1448         int ival;
1449         struct smi_info *info;
1450
1451         if (!str)
1452                 return -ENOMEM;
1453
1454         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1455         ival = strlen(str) - 1;
1456         while ((ival >= 0) && isspace(str[ival])) {
1457                 str[ival] = '\0';
1458                 ival--;
1459         }
1460
1461         for (curr = str; curr; curr = next) {
1462                 regspacing = 1;
1463                 regsize = 1;
1464                 regshift = 0;
1465                 irq = 0;
1466                 ipmb = 0x20;
1467
1468                 next = strchr(curr, ':');
1469                 if (next) {
1470                         *next = '\0';
1471                         next++;
1472                 }
1473
1474                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1475                 if (rv)
1476                         break;
1477                 op = ival;
1478
1479                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1480                 if (rv)
1481                         break;
1482                 si_type = ival;
1483
1484                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1485                 if (rv)
1486                         break;
1487
1488                 s = strchr(curr, ',');
1489                 if (s) {
1490                         *s = '\0';
1491                         s++;
1492                 }
1493                 addr = simple_strtoul(curr, &n, 0);
1494                 if ((*n != '\0') || (*curr == '\0')) {
1495                         printk(KERN_WARNING PFX "Invalid hotmod address"
1496                                " '%s'\n", curr);
1497                         break;
1498                 }
1499
1500                 while (s) {
1501                         curr = s;
1502                         s = strchr(curr, ',');
1503                         if (s) {
1504                                 *s = '\0';
1505                                 s++;
1506                         }
1507                         o = strchr(curr, '=');
1508                         if (o) {
1509                                 *o = '\0';
1510                                 o++;
1511                         }
1512 #define HOTMOD_INT_OPT(name, val) \
1513                         if (ipmi_strcasecmp(curr, name) == 0) {         \
1514                                 if (!o) {                               \
1515                                         printk(KERN_WARNING PFX         \
1516                                                "No option given for '%s'\n", \
1517                                                 curr);                  \
1518                                         goto out;                       \
1519                                 }                                       \
1520                                 val = simple_strtoul(o, &n, 0);         \
1521                                 if ((*n != '\0') || (*o == '\0')) {     \
1522                                         printk(KERN_WARNING PFX         \
1523                                                "Bad option given for '%s'\n", \
1524                                                curr);                   \
1525                                         goto out;                       \
1526                                 }                                       \
1527                         }
1528
1529                         HOTMOD_INT_OPT("rsp", regspacing)
1530                         else HOTMOD_INT_OPT("rsi", regsize)
1531                         else HOTMOD_INT_OPT("rsh", regshift)
1532                         else HOTMOD_INT_OPT("irq", irq)
1533                         else HOTMOD_INT_OPT("ipmb", ipmb)
1534                         else {
1535                                 printk(KERN_WARNING PFX
1536                                        "Invalid hotmod option '%s'\n",
1537                                        curr);
1538                                 goto out;
1539                         }
1540 #undef HOTMOD_INT_OPT
1541                 }
1542
1543                 if (op == HM_ADD) {
1544                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1545                         if (!info) {
1546                                 rv = -ENOMEM;
1547                                 goto out;
1548                         }
1549
1550                         info->addr_source = "hotmod";
1551                         info->si_type = si_type;
1552                         info->io.addr_data = addr;
1553                         info->io.addr_type = addr_space;
1554                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1555                                 info->io_setup = mem_setup;
1556                         else
1557                                 info->io_setup = port_setup;
1558
1559                         info->io.addr = NULL;
1560                         info->io.regspacing = regspacing;
1561                         if (!info->io.regspacing)
1562                                 info->io.regspacing = DEFAULT_REGSPACING;
1563                         info->io.regsize = regsize;
1564                         if (!info->io.regsize)
1565                                 info->io.regsize = DEFAULT_REGSPACING;
1566                         info->io.regshift = regshift;
1567                         info->irq = irq;
1568                         if (info->irq)
1569                                 info->irq_setup = std_irq_setup;
1570                         info->slave_addr = ipmb;
1571
1572                         try_smi_init(info);
1573                 } else {
1574                         /* remove */
1575                         struct smi_info *e, *tmp_e;
1576
1577                         mutex_lock(&smi_infos_lock);
1578                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1579                                 if (e->io.addr_type != addr_space)
1580                                         continue;
1581                                 if (e->si_type != si_type)
1582                                         continue;
1583                                 if (e->io.addr_data == addr)
1584                                         cleanup_one_si(e);
1585                         }
1586                         mutex_unlock(&smi_infos_lock);
1587                 }
1588         }
1589  out:
1590         kfree(str);
1591         return rv;
1592 }
1593
1594 static __devinit void hardcode_find_bmc(void)
1595 {
1596         int             i;
1597         struct smi_info *info;
1598
1599         for (i = 0; i < SI_MAX_PARMS; i++) {
1600                 if (!ports[i] && !addrs[i])
1601                         continue;
1602
1603                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1604                 if (!info)
1605                         return;
1606
1607                 info->addr_source = "hardcoded";
1608
1609                 if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
1610                         info->si_type = SI_KCS;
1611                 } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
1612                         info->si_type = SI_SMIC;
1613                 } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
1614                         info->si_type = SI_BT;
1615                 } else {
1616                         printk(KERN_WARNING
1617                                "ipmi_si: Interface type specified "
1618                                "for interface %d, was invalid: %s\n",
1619                                i, si_type[i]);
1620                         kfree(info);
1621                         continue;
1622                 }
1623
1624                 if (ports[i]) {
1625                         /* An I/O port */
1626                         info->io_setup = port_setup;
1627                         info->io.addr_data = ports[i];
1628                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1629                 } else if (addrs[i]) {
1630                         /* A memory port */
1631                         info->io_setup = mem_setup;
1632                         info->io.addr_data = addrs[i];
1633                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1634                 } else {
1635                         printk(KERN_WARNING
1636                                "ipmi_si: Interface type specified "
1637                                "for interface %d, "
1638                                "but port and address were not set or "
1639                                "set to zero.\n", i);
1640                         kfree(info);
1641                         continue;
1642                 }
1643
1644                 info->io.addr = NULL;
1645                 info->io.regspacing = regspacings[i];
1646                 if (!info->io.regspacing)
1647                         info->io.regspacing = DEFAULT_REGSPACING;
1648                 info->io.regsize = regsizes[i];
1649                 if (!info->io.regsize)
1650                         info->io.regsize = DEFAULT_REGSPACING;
1651                 info->io.regshift = regshifts[i];
1652                 info->irq = irqs[i];
1653                 if (info->irq)
1654                         info->irq_setup = std_irq_setup;
1655
1656                 try_smi_init(info);
1657         }
1658 }
1659
1660 #ifdef CONFIG_ACPI
1661
1662 #include <linux/acpi.h>
1663
1664 /* Once we get an ACPI failure, we don't try any more, because we go
1665    through the tables sequentially.  Once we don't find a table, there
1666    are no more. */
1667 static int acpi_failure = 0;
1668
1669 /* For GPE-type interrupts. */
1670 static u32 ipmi_acpi_gpe(void *context)
1671 {
1672         struct smi_info *smi_info = context;
1673         unsigned long   flags;
1674 #ifdef DEBUG_TIMING
1675         struct timeval t;
1676 #endif
1677
1678         spin_lock_irqsave(&(smi_info->si_lock), flags);
1679
1680         spin_lock(&smi_info->count_lock);
1681         smi_info->interrupts++;
1682         spin_unlock(&smi_info->count_lock);
1683
1684         if (atomic_read(&smi_info->stop_operation))
1685                 goto out;
1686
1687 #ifdef DEBUG_TIMING
1688         do_gettimeofday(&t);
1689         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1690 #endif
1691         smi_event_handler(smi_info, 0);
1692  out:
1693         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1694
1695         return ACPI_INTERRUPT_HANDLED;
1696 }
1697
1698 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1699 {
1700         if (!info->irq)
1701                 return;
1702
1703         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1704 }
1705
1706 static int acpi_gpe_irq_setup(struct smi_info *info)
1707 {
1708         acpi_status status;
1709
1710         if (!info->irq)
1711                 return 0;
1712
1713         /* FIXME - is level triggered right? */
1714         status = acpi_install_gpe_handler(NULL,
1715                                           info->irq,
1716                                           ACPI_GPE_LEVEL_TRIGGERED,
1717                                           &ipmi_acpi_gpe,
1718                                           info);
1719         if (status != AE_OK) {
1720                 printk(KERN_WARNING
1721                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1722                        " running polled\n",
1723                        DEVICE_NAME, info->irq);
1724                 info->irq = 0;
1725                 return -EINVAL;
1726         } else {
1727                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1728                 printk("  Using ACPI GPE %d\n", info->irq);
1729                 return 0;
1730         }
1731 }
1732
1733 /*
1734  * Defined at
1735  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1736  */
1737 struct SPMITable {
1738         s8      Signature[4];
1739         u32     Length;
1740         u8      Revision;
1741         u8      Checksum;
1742         s8      OEMID[6];
1743         s8      OEMTableID[8];
1744         s8      OEMRevision[4];
1745         s8      CreatorID[4];
1746         s8      CreatorRevision[4];
1747         u8      InterfaceType;
1748         u8      IPMIlegacy;
1749         s16     SpecificationRevision;
1750
1751         /*
1752          * Bit 0 - SCI interrupt supported
1753          * Bit 1 - I/O APIC/SAPIC
1754          */
1755         u8      InterruptType;
1756
1757         /* If bit 0 of InterruptType is set, then this is the SCI
1758            interrupt in the GPEx_STS register. */
1759         u8      GPE;
1760
1761         s16     Reserved;
1762
1763         /* If bit 1 of InterruptType is set, then this is the I/O
1764            APIC/SAPIC interrupt. */
1765         u32     GlobalSystemInterrupt;
1766
1767         /* The actual register address. */
1768         struct acpi_generic_address addr;
1769
1770         u8      UID[4];
1771
1772         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1773 };
1774
1775 static __devinit int try_init_acpi(struct SPMITable *spmi)
1776 {
1777         struct smi_info  *info;
1778         char             *io_type;
1779         u8               addr_space;
1780
1781         if (spmi->IPMIlegacy != 1) {
1782             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1783             return -ENODEV;
1784         }
1785
1786         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1787                 addr_space = IPMI_MEM_ADDR_SPACE;
1788         else
1789                 addr_space = IPMI_IO_ADDR_SPACE;
1790
1791         info = kzalloc(sizeof(*info), GFP_KERNEL);
1792         if (!info) {
1793                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1794                 return -ENOMEM;
1795         }
1796
1797         info->addr_source = "ACPI";
1798
1799         /* Figure out the interface type. */
1800         switch (spmi->InterfaceType)
1801         {
1802         case 1: /* KCS */
1803                 info->si_type = SI_KCS;
1804                 break;
1805         case 2: /* SMIC */
1806                 info->si_type = SI_SMIC;
1807                 break;
1808         case 3: /* BT */
1809                 info->si_type = SI_BT;
1810                 break;
1811         default:
1812                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1813                         spmi->InterfaceType);
1814                 kfree(info);
1815                 return -EIO;
1816         }
1817
1818         if (spmi->InterruptType & 1) {
1819                 /* We've got a GPE interrupt. */
1820                 info->irq = spmi->GPE;
1821                 info->irq_setup = acpi_gpe_irq_setup;
1822         } else if (spmi->InterruptType & 2) {
1823                 /* We've got an APIC/SAPIC interrupt. */
1824                 info->irq = spmi->GlobalSystemInterrupt;
1825                 info->irq_setup = std_irq_setup;
1826         } else {
1827                 /* Use the default interrupt setting. */
1828                 info->irq = 0;
1829                 info->irq_setup = NULL;
1830         }
1831
1832         if (spmi->addr.register_bit_width) {
1833                 /* A (hopefully) properly formed register bit width. */
1834                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1835         } else {
1836                 info->io.regspacing = DEFAULT_REGSPACING;
1837         }
1838         info->io.regsize = info->io.regspacing;
1839         info->io.regshift = spmi->addr.register_bit_offset;
1840
1841         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1842                 io_type = "memory";
1843                 info->io_setup = mem_setup;
1844                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1845         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1846                 io_type = "I/O";
1847                 info->io_setup = port_setup;
1848                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1849         } else {
1850                 kfree(info);
1851                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1852                 return -EIO;
1853         }
1854         info->io.addr_data = spmi->addr.address;
1855
1856         try_smi_init(info);
1857
1858         return 0;
1859 }
1860
1861 static __devinit void acpi_find_bmc(void)
1862 {
1863         acpi_status      status;
1864         struct SPMITable *spmi;
1865         int              i;
1866
1867         if (acpi_disabled)
1868                 return;
1869
1870         if (acpi_failure)
1871                 return;
1872
1873         for (i = 0; ; i++) {
1874                 status = acpi_get_firmware_table("SPMI", i+1,
1875                                                  ACPI_LOGICAL_ADDRESSING,
1876                                                  (struct acpi_table_header **)
1877                                                  &spmi);
1878                 if (status != AE_OK)
1879                         return;
1880
1881                 try_init_acpi(spmi);
1882         }
1883 }
1884 #endif
1885
1886 #ifdef CONFIG_DMI
1887 struct dmi_ipmi_data
1888 {
1889         u8              type;
1890         u8              addr_space;
1891         unsigned long   base_addr;
1892         u8              irq;
1893         u8              offset;
1894         u8              slave_addr;
1895 };
1896
1897 static int __devinit decode_dmi(struct dmi_header *dm,
1898                                 struct dmi_ipmi_data *dmi)
1899 {
1900         u8              *data = (u8 *)dm;
1901         unsigned long   base_addr;
1902         u8              reg_spacing;
1903         u8              len = dm->length;
1904
1905         dmi->type = data[4];
1906
1907         memcpy(&base_addr, data+8, sizeof(unsigned long));
1908         if (len >= 0x11) {
1909                 if (base_addr & 1) {
1910                         /* I/O */
1911                         base_addr &= 0xFFFE;
1912                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1913                 }
1914                 else {
1915                         /* Memory */
1916                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1917                 }
1918                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1919                    is odd. */
1920                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1921
1922                 dmi->irq = data[0x11];
1923
1924                 /* The top two bits of byte 0x10 hold the register spacing. */
1925                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1926                 switch(reg_spacing){
1927                 case 0x00: /* Byte boundaries */
1928                     dmi->offset = 1;
1929                     break;
1930                 case 0x01: /* 32-bit boundaries */
1931                     dmi->offset = 4;
1932                     break;
1933                 case 0x02: /* 16-byte boundaries */
1934                     dmi->offset = 16;
1935                     break;
1936                 default:
1937                     /* Some other interface, just ignore it. */
1938                     return -EIO;
1939                 }
1940         } else {
1941                 /* Old DMI spec. */
1942                 /* Note that technically, the lower bit of the base
1943                  * address should be 1 if the address is I/O and 0 if
1944                  * the address is in memory.  So many systems get that
1945                  * wrong (and all that I have seen are I/O) so we just
1946                  * ignore that bit and assume I/O.  Systems that use
1947                  * memory should use the newer spec, anyway. */
1948                 dmi->base_addr = base_addr & 0xfffe;
1949                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1950                 dmi->offset = 1;
1951         }
1952
1953         dmi->slave_addr = data[6];
1954
1955         return 0;
1956 }
1957
1958 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1959 {
1960         struct smi_info *info;
1961
1962         info = kzalloc(sizeof(*info), GFP_KERNEL);
1963         if (!info) {
1964                 printk(KERN_ERR
1965                        "ipmi_si: Could not allocate SI data\n");
1966                 return;
1967         }
1968
1969         info->addr_source = "SMBIOS";
1970
1971         switch (ipmi_data->type) {
1972         case 0x01: /* KCS */
1973                 info->si_type = SI_KCS;
1974                 break;
1975         case 0x02: /* SMIC */
1976                 info->si_type = SI_SMIC;
1977                 break;
1978         case 0x03: /* BT */
1979                 info->si_type = SI_BT;
1980                 break;
1981         default:
1982                 return;
1983         }
1984
1985         switch (ipmi_data->addr_space) {
1986         case IPMI_MEM_ADDR_SPACE:
1987                 info->io_setup = mem_setup;
1988                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1989                 break;
1990
1991         case IPMI_IO_ADDR_SPACE:
1992                 info->io_setup = port_setup;
1993                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1994                 break;
1995
1996         default:
1997                 kfree(info);
1998                 printk(KERN_WARNING
1999                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2000                        ipmi_data->addr_space);
2001                 return;
2002         }
2003         info->io.addr_data = ipmi_data->base_addr;
2004
2005         info->io.regspacing = ipmi_data->offset;
2006         if (!info->io.regspacing)
2007                 info->io.regspacing = DEFAULT_REGSPACING;
2008         info->io.regsize = DEFAULT_REGSPACING;
2009         info->io.regshift = 0;
2010
2011         info->slave_addr = ipmi_data->slave_addr;
2012
2013         info->irq = ipmi_data->irq;
2014         if (info->irq)
2015                 info->irq_setup = std_irq_setup;
2016
2017         try_smi_init(info);
2018 }
2019
2020 static void __devinit dmi_find_bmc(void)
2021 {
2022         struct dmi_device    *dev = NULL;
2023         struct dmi_ipmi_data data;
2024         int                  rv;
2025
2026         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2027                 memset(&data, 0, sizeof(data));
2028                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2029                 if (!rv)
2030                         try_init_dmi(&data);
2031         }
2032 }
2033 #endif /* CONFIG_DMI */
2034
2035 #ifdef CONFIG_PCI
2036
2037 #define PCI_ERMC_CLASSCODE              0x0C0700
2038 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2039 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2040 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2041 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2042 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2043
2044 #define PCI_HP_VENDOR_ID    0x103C
2045 #define PCI_MMC_DEVICE_ID   0x121A
2046 #define PCI_MMC_ADDR_CW     0x10
2047
2048 static void ipmi_pci_cleanup(struct smi_info *info)
2049 {
2050         struct pci_dev *pdev = info->addr_source_data;
2051
2052         pci_disable_device(pdev);
2053 }
2054
2055 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2056                                     const struct pci_device_id *ent)
2057 {
2058         int rv;
2059         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2060         struct smi_info *info;
2061         int first_reg_offset = 0;
2062
2063         info = kzalloc(sizeof(*info), GFP_KERNEL);
2064         if (!info)
2065                 return -ENOMEM;
2066
2067         info->addr_source = "PCI";
2068
2069         switch (class_type) {
2070         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2071                 info->si_type = SI_SMIC;
2072                 break;
2073
2074         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2075                 info->si_type = SI_KCS;
2076                 break;
2077
2078         case PCI_ERMC_CLASSCODE_TYPE_BT:
2079                 info->si_type = SI_BT;
2080                 break;
2081
2082         default:
2083                 kfree(info);
2084                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2085                        pci_name(pdev), class_type);
2086                 return -ENOMEM;
2087         }
2088
2089         rv = pci_enable_device(pdev);
2090         if (rv) {
2091                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2092                        pci_name(pdev));
2093                 kfree(info);
2094                 return rv;
2095         }
2096
2097         info->addr_source_cleanup = ipmi_pci_cleanup;
2098         info->addr_source_data = pdev;
2099
2100         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2101                 first_reg_offset = 1;
2102
2103         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2104                 info->io_setup = port_setup;
2105                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2106         } else {
2107                 info->io_setup = mem_setup;
2108                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2109         }
2110         info->io.addr_data = pci_resource_start(pdev, 0);
2111
2112         info->io.regspacing = DEFAULT_REGSPACING;
2113         info->io.regsize = DEFAULT_REGSPACING;
2114         info->io.regshift = 0;
2115
2116         info->irq = pdev->irq;
2117         if (info->irq)
2118                 info->irq_setup = std_irq_setup;
2119
2120         info->dev = &pdev->dev;
2121
2122         return try_smi_init(info);
2123 }
2124
2125 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2126 {
2127 }
2128
2129 #ifdef CONFIG_PM
2130 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2131 {
2132         return 0;
2133 }
2134
2135 static int ipmi_pci_resume(struct pci_dev *pdev)
2136 {
2137         return 0;
2138 }
2139 #endif
2140
2141 static struct pci_device_id ipmi_pci_devices[] = {
2142         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2143         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2144 };
2145 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2146
2147 static struct pci_driver ipmi_pci_driver = {
2148         .name =         DEVICE_NAME,
2149         .id_table =     ipmi_pci_devices,
2150         .probe =        ipmi_pci_probe,
2151         .remove =       __devexit_p(ipmi_pci_remove),
2152 #ifdef CONFIG_PM
2153         .suspend =      ipmi_pci_suspend,
2154         .resume =       ipmi_pci_resume,
2155 #endif
2156 };
2157 #endif /* CONFIG_PCI */
2158
2159
2160 static int try_get_dev_id(struct smi_info *smi_info)
2161 {
2162         unsigned char         msg[2];
2163         unsigned char         *resp;
2164         unsigned long         resp_len;
2165         enum si_sm_result     smi_result;
2166         int                   rv = 0;
2167
2168         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2169         if (!resp)
2170                 return -ENOMEM;
2171
2172         /* Do a Get Device ID command, since it comes back with some
2173            useful info. */
2174         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2175         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2176         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2177
2178         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2179         for (;;)
2180         {
2181                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2182                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2183                         schedule_timeout_uninterruptible(1);
2184                         smi_result = smi_info->handlers->event(
2185                                 smi_info->si_sm, 100);
2186                 }
2187                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2188                 {
2189                         smi_result = smi_info->handlers->event(
2190                                 smi_info->si_sm, 0);
2191                 }
2192                 else
2193                         break;
2194         }
2195         if (smi_result == SI_SM_HOSED) {
2196                 /* We couldn't get the state machine to run, so whatever's at
2197                    the port is probably not an IPMI SMI interface. */
2198                 rv = -ENODEV;
2199                 goto out;
2200         }
2201
2202         /* Otherwise, we got some data. */
2203         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2204                                                   resp, IPMI_MAX_MSG_LENGTH);
2205         if (resp_len < 14) {
2206                 /* That's odd, it should be longer. */
2207                 rv = -EINVAL;
2208                 goto out;
2209         }
2210
2211         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2212                 /* That's odd, it shouldn't be able to fail. */
2213                 rv = -EINVAL;
2214                 goto out;
2215         }
2216
2217         /* Record info from the get device id, in case we need it. */
2218         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2219
2220  out:
2221         kfree(resp);
2222         return rv;
2223 }
2224
2225 static int type_file_read_proc(char *page, char **start, off_t off,
2226                                int count, int *eof, void *data)
2227 {
2228         struct smi_info *smi = data;
2229
2230         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2231 }
2232
2233 static int stat_file_read_proc(char *page, char **start, off_t off,
2234                                int count, int *eof, void *data)
2235 {
2236         char            *out = (char *) page;
2237         struct smi_info *smi = data;
2238
2239         out += sprintf(out, "interrupts_enabled:    %d\n",
2240                        smi->irq && !smi->interrupt_disabled);
2241         out += sprintf(out, "short_timeouts:        %ld\n",
2242                        smi->short_timeouts);
2243         out += sprintf(out, "long_timeouts:         %ld\n",
2244                        smi->long_timeouts);
2245         out += sprintf(out, "timeout_restarts:      %ld\n",
2246                        smi->timeout_restarts);
2247         out += sprintf(out, "idles:                 %ld\n",
2248                        smi->idles);
2249         out += sprintf(out, "interrupts:            %ld\n",
2250                        smi->interrupts);
2251         out += sprintf(out, "attentions:            %ld\n",
2252                        smi->attentions);
2253         out += sprintf(out, "flag_fetches:          %ld\n",
2254                        smi->flag_fetches);
2255         out += sprintf(out, "hosed_count:           %ld\n",
2256                        smi->hosed_count);
2257         out += sprintf(out, "complete_transactions: %ld\n",
2258                        smi->complete_transactions);
2259         out += sprintf(out, "events:                %ld\n",
2260                        smi->events);
2261         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2262                        smi->watchdog_pretimeouts);
2263         out += sprintf(out, "incoming_messages:     %ld\n",
2264                        smi->incoming_messages);
2265
2266         return out - page;
2267 }
2268
2269 static int param_read_proc(char *page, char **start, off_t off,
2270                            int count, int *eof, void *data)
2271 {
2272         struct smi_info *smi = data;
2273
2274         return sprintf(page,
2275                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2276                        si_to_str[smi->si_type],
2277                        addr_space_to_str[smi->io.addr_type],
2278                        smi->io.addr_data,
2279                        smi->io.regspacing,
2280                        smi->io.regsize,
2281                        smi->io.regshift,
2282                        smi->irq,
2283                        smi->slave_addr);
2284 }
2285
2286 /*
2287  * oem_data_avail_to_receive_msg_avail
2288  * @info - smi_info structure with msg_flags set
2289  *
2290  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2291  * Returns 1 indicating need to re-run handle_flags().
2292  */
2293 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2294 {
2295         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2296                                 RECEIVE_MSG_AVAIL);
2297         return 1;
2298 }
2299
2300 /*
2301  * setup_dell_poweredge_oem_data_handler
2302  * @info - smi_info.device_id must be populated
2303  *
2304  * Systems that match, but have firmware version < 1.40 may assert
2305  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2306  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2307  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2308  * as RECEIVE_MSG_AVAIL instead.
2309  *
2310  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2311  * assert the OEM[012] bits, and if it did, the driver would have to
2312  * change to handle that properly, we don't actually check for the
2313  * firmware version.
2314  * Device ID = 0x20                BMC on PowerEdge 8G servers
2315  * Device Revision = 0x80
2316  * Firmware Revision1 = 0x01       BMC version 1.40
2317  * Firmware Revision2 = 0x40       BCD encoded
2318  * IPMI Version = 0x51             IPMI 1.5
2319  * Manufacturer ID = A2 02 00      Dell IANA
2320  *
2321  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2322  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2323  *
2324  */
2325 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2326 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2327 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2328 #define DELL_IANA_MFR_ID 0x0002a2
2329 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2330 {
2331         struct ipmi_device_id *id = &smi_info->device_id;
2332         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2333                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2334                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2335                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2336                         smi_info->oem_data_avail_handler =
2337                                 oem_data_avail_to_receive_msg_avail;
2338                 }
2339                 else if (ipmi_version_major(id) < 1 ||
2340                          (ipmi_version_major(id) == 1 &&
2341                           ipmi_version_minor(id) < 5)) {
2342                         smi_info->oem_data_avail_handler =
2343                                 oem_data_avail_to_receive_msg_avail;
2344                 }
2345         }
2346 }
2347
2348 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2349 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2350 {
2351         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2352
2353         /* Make it a reponse */
2354         msg->rsp[0] = msg->data[0] | 4;
2355         msg->rsp[1] = msg->data[1];
2356         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2357         msg->rsp_size = 3;
2358         smi_info->curr_msg = NULL;
2359         deliver_recv_msg(smi_info, msg);
2360 }
2361
2362 /*
2363  * dell_poweredge_bt_xaction_handler
2364  * @info - smi_info.device_id must be populated
2365  *
2366  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2367  * not respond to a Get SDR command if the length of the data
2368  * requested is exactly 0x3A, which leads to command timeouts and no
2369  * data returned.  This intercepts such commands, and causes userspace
2370  * callers to try again with a different-sized buffer, which succeeds.
2371  */
2372
2373 #define STORAGE_NETFN 0x0A
2374 #define STORAGE_CMD_GET_SDR 0x23
2375 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2376                                              unsigned long unused,
2377                                              void *in)
2378 {
2379         struct smi_info *smi_info = in;
2380         unsigned char *data = smi_info->curr_msg->data;
2381         unsigned int size   = smi_info->curr_msg->data_size;
2382         if (size >= 8 &&
2383             (data[0]>>2) == STORAGE_NETFN &&
2384             data[1] == STORAGE_CMD_GET_SDR &&
2385             data[7] == 0x3A) {
2386                 return_hosed_msg_badsize(smi_info);
2387                 return NOTIFY_STOP;
2388         }
2389         return NOTIFY_DONE;
2390 }
2391
2392 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2393         .notifier_call  = dell_poweredge_bt_xaction_handler,
2394 };
2395
2396 /*
2397  * setup_dell_poweredge_bt_xaction_handler
2398  * @info - smi_info.device_id must be filled in already
2399  *
2400  * Fills in smi_info.device_id.start_transaction_pre_hook
2401  * when we know what function to use there.
2402  */
2403 static void
2404 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2405 {
2406         struct ipmi_device_id *id = &smi_info->device_id;
2407         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2408             smi_info->si_type == SI_BT)
2409                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2410 }
2411
2412 /*
2413  * setup_oem_data_handler
2414  * @info - smi_info.device_id must be filled in already
2415  *
2416  * Fills in smi_info.device_id.oem_data_available_handler
2417  * when we know what function to use there.
2418  */
2419
2420 static void setup_oem_data_handler(struct smi_info *smi_info)
2421 {
2422         setup_dell_poweredge_oem_data_handler(smi_info);
2423 }
2424
2425 static void setup_xaction_handlers(struct smi_info *smi_info)
2426 {
2427         setup_dell_poweredge_bt_xaction_handler(smi_info);
2428 }
2429
2430 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2431 {
2432         if (smi_info->intf) {
2433                 /* The timer and thread are only running if the
2434                    interface has been started up and registered. */
2435                 if (smi_info->thread != NULL)
2436                         kthread_stop(smi_info->thread);
2437                 del_timer_sync(&smi_info->si_timer);
2438         }
2439 }
2440
2441 static __devinitdata struct ipmi_default_vals
2442 {
2443         int type;
2444         int port;
2445 } ipmi_defaults[] =
2446 {
2447         { .type = SI_KCS, .port = 0xca2 },
2448         { .type = SI_SMIC, .port = 0xca9 },
2449         { .type = SI_BT, .port = 0xe4 },
2450         { .port = 0 }
2451 };
2452
2453 static __devinit void default_find_bmc(void)
2454 {
2455         struct smi_info *info;
2456         int             i;
2457
2458         for (i = 0; ; i++) {
2459                 if (!ipmi_defaults[i].port)
2460                         break;
2461
2462                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2463                 if (!info)
2464                         return;
2465
2466                 info->addr_source = NULL;
2467
2468                 info->si_type = ipmi_defaults[i].type;
2469                 info->io_setup = port_setup;
2470                 info->io.addr_data = ipmi_defaults[i].port;
2471                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2472
2473                 info->io.addr = NULL;
2474                 info->io.regspacing = DEFAULT_REGSPACING;
2475                 info->io.regsize = DEFAULT_REGSPACING;
2476                 info->io.regshift = 0;
2477
2478                 if (try_smi_init(info) == 0) {
2479                         /* Found one... */
2480                         printk(KERN_INFO "ipmi_si: Found default %s state"
2481                                " machine at %s address 0x%lx\n",
2482                                si_to_str[info->si_type],
2483                                addr_space_to_str[info->io.addr_type],
2484                                info->io.addr_data);
2485                         return;
2486                 }
2487         }
2488 }
2489
2490 static int is_new_interface(struct smi_info *info)
2491 {
2492         struct smi_info *e;
2493
2494         list_for_each_entry(e, &smi_infos, link) {
2495                 if (e->io.addr_type != info->io.addr_type)
2496                         continue;
2497                 if (e->io.addr_data == info->io.addr_data)
2498                         return 0;
2499         }
2500
2501         return 1;
2502 }
2503
2504 static int try_smi_init(struct smi_info *new_smi)
2505 {
2506         int rv;
2507
2508         if (new_smi->addr_source) {
2509                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2510                        " machine at %s address 0x%lx, slave address 0x%x,"
2511                        " irq %d\n",
2512                        new_smi->addr_source,
2513                        si_to_str[new_smi->si_type],
2514                        addr_space_to_str[new_smi->io.addr_type],
2515                        new_smi->io.addr_data,
2516                        new_smi->slave_addr, new_smi->irq);
2517         }
2518
2519         mutex_lock(&smi_infos_lock);
2520         if (!is_new_interface(new_smi)) {
2521                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2522                 rv = -EBUSY;
2523                 goto out_err;
2524         }
2525
2526         /* So we know not to free it unless we have allocated one. */
2527         new_smi->intf = NULL;
2528         new_smi->si_sm = NULL;
2529         new_smi->handlers = NULL;
2530
2531         switch (new_smi->si_type) {
2532         case SI_KCS:
2533                 new_smi->handlers = &kcs_smi_handlers;
2534                 break;
2535
2536         case SI_SMIC:
2537                 new_smi->handlers = &smic_smi_handlers;
2538                 break;
2539
2540         case SI_BT:
2541                 new_smi->handlers = &bt_smi_handlers;
2542                 break;
2543
2544         default:
2545                 /* No support for anything else yet. */
2546                 rv = -EIO;
2547                 goto out_err;
2548         }
2549
2550         /* Allocate the state machine's data and initialize it. */
2551         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2552         if (!new_smi->si_sm) {
2553                 printk(" Could not allocate state machine memory\n");
2554                 rv = -ENOMEM;
2555                 goto out_err;
2556         }
2557         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2558                                                         &new_smi->io);
2559
2560         /* Now that we know the I/O size, we can set up the I/O. */
2561         rv = new_smi->io_setup(new_smi);
2562         if (rv) {
2563                 printk(" Could not set up I/O space\n");
2564                 goto out_err;
2565         }
2566
2567         spin_lock_init(&(new_smi->si_lock));
2568         spin_lock_init(&(new_smi->msg_lock));
2569         spin_lock_init(&(new_smi->count_lock));
2570
2571         /* Do low-level detection first. */
2572         if (new_smi->handlers->detect(new_smi->si_sm)) {
2573                 if (new_smi->addr_source)
2574                         printk(KERN_INFO "ipmi_si: Interface detection"
2575                                " failed\n");
2576                 rv = -ENODEV;
2577                 goto out_err;
2578         }
2579
2580         /* Attempt a get device id command.  If it fails, we probably
2581            don't have a BMC here. */
2582         rv = try_get_dev_id(new_smi);
2583         if (rv) {
2584                 if (new_smi->addr_source)
2585                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2586                                " at this location\n");
2587                 goto out_err;
2588         }
2589
2590         setup_oem_data_handler(new_smi);
2591         setup_xaction_handlers(new_smi);
2592
2593         /* Try to claim any interrupts. */
2594         if (new_smi->irq_setup)
2595                 new_smi->irq_setup(new_smi);
2596
2597         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2598         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2599         new_smi->curr_msg = NULL;
2600         atomic_set(&new_smi->req_events, 0);
2601         new_smi->run_to_completion = 0;
2602
2603         new_smi->interrupt_disabled = 0;
2604         atomic_set(&new_smi->stop_operation, 0);
2605         new_smi->intf_num = smi_num;
2606         smi_num++;
2607
2608         /* Start clearing the flags before we enable interrupts or the
2609            timer to avoid racing with the timer. */
2610         start_clear_flags(new_smi);
2611         /* IRQ is defined to be set when non-zero. */
2612         if (new_smi->irq)
2613                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2614
2615         if (!new_smi->dev) {
2616                 /* If we don't already have a device from something
2617                  * else (like PCI), then register a new one. */
2618                 new_smi->pdev = platform_device_alloc("ipmi_si",
2619                                                       new_smi->intf_num);
2620                 if (rv) {
2621                         printk(KERN_ERR
2622                                "ipmi_si_intf:"
2623                                " Unable to allocate platform device\n");
2624                         goto out_err;
2625                 }
2626                 new_smi->dev = &new_smi->pdev->dev;
2627                 new_smi->dev->driver = &ipmi_driver;
2628
2629                 rv = platform_device_add(new_smi->pdev);
2630                 if (rv) {
2631                         printk(KERN_ERR
2632                                "ipmi_si_intf:"
2633                                " Unable to register system interface device:"
2634                                " %d\n",
2635                                rv);
2636                         goto out_err;
2637                 }
2638                 new_smi->dev_registered = 1;
2639         }
2640
2641         rv = ipmi_register_smi(&handlers,
2642                                new_smi,
2643                                &new_smi->device_id,
2644                                new_smi->dev,
2645                                "bmc",
2646                                new_smi->slave_addr);
2647         if (rv) {
2648                 printk(KERN_ERR
2649                        "ipmi_si: Unable to register device: error %d\n",
2650                        rv);
2651                 goto out_err_stop_timer;
2652         }
2653
2654         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2655                                      type_file_read_proc, NULL,
2656                                      new_smi, THIS_MODULE);
2657         if (rv) {
2658                 printk(KERN_ERR
2659                        "ipmi_si: Unable to create proc entry: %d\n",
2660                        rv);
2661                 goto out_err_stop_timer;
2662         }
2663
2664         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2665                                      stat_file_read_proc, NULL,
2666                                      new_smi, THIS_MODULE);
2667         if (rv) {
2668                 printk(KERN_ERR
2669                        "ipmi_si: Unable to create proc entry: %d\n",
2670                        rv);
2671                 goto out_err_stop_timer;
2672         }
2673
2674         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2675                                      param_read_proc, NULL,
2676                                      new_smi, THIS_MODULE);
2677         if (rv) {
2678                 printk(KERN_ERR
2679                        "ipmi_si: Unable to create proc entry: %d\n",
2680                        rv);
2681                 goto out_err_stop_timer;
2682         }
2683
2684         list_add_tail(&new_smi->link, &smi_infos);
2685
2686         mutex_unlock(&smi_infos_lock);
2687
2688         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2689
2690         return 0;
2691
2692  out_err_stop_timer:
2693         atomic_inc(&new_smi->stop_operation);
2694         wait_for_timer_and_thread(new_smi);
2695
2696  out_err:
2697         if (new_smi->intf)
2698                 ipmi_unregister_smi(new_smi->intf);
2699
2700         if (new_smi->irq_cleanup)
2701                 new_smi->irq_cleanup(new_smi);
2702
2703         /* Wait until we know that we are out of any interrupt
2704            handlers might have been running before we freed the
2705            interrupt. */
2706         synchronize_sched();
2707
2708         if (new_smi->si_sm) {
2709                 if (new_smi->handlers)
2710                         new_smi->handlers->cleanup(new_smi->si_sm);
2711                 kfree(new_smi->si_sm);
2712         }
2713         if (new_smi->addr_source_cleanup)
2714                 new_smi->addr_source_cleanup(new_smi);
2715         if (new_smi->io_cleanup)
2716                 new_smi->io_cleanup(new_smi);
2717
2718         if (new_smi->dev_registered)
2719                 platform_device_unregister(new_smi->pdev);
2720
2721         kfree(new_smi);
2722
2723         mutex_unlock(&smi_infos_lock);
2724
2725         return rv;
2726 }
2727
2728 static __devinit int init_ipmi_si(void)
2729 {
2730         int  i;
2731         char *str;
2732         int  rv;
2733
2734         if (initialized)
2735                 return 0;
2736         initialized = 1;
2737
2738         /* Register the device drivers. */
2739         rv = driver_register(&ipmi_driver);
2740         if (rv) {
2741                 printk(KERN_ERR
2742                        "init_ipmi_si: Unable to register driver: %d\n",
2743                        rv);
2744                 return rv;
2745         }
2746
2747
2748         /* Parse out the si_type string into its components. */
2749         str = si_type_str;
2750         if (*str != '\0') {
2751                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2752                         si_type[i] = str;
2753                         str = strchr(str, ',');
2754                         if (str) {
2755                                 *str = '\0';
2756                                 str++;
2757                         } else {
2758                                 break;
2759                         }
2760                 }
2761         }
2762
2763         printk(KERN_INFO "IPMI System Interface driver.\n");
2764
2765         hardcode_find_bmc();
2766
2767 #ifdef CONFIG_DMI
2768         dmi_find_bmc();
2769 #endif
2770
2771 #ifdef CONFIG_ACPI
2772         if (si_trydefaults)
2773                 acpi_find_bmc();
2774 #endif
2775
2776 #ifdef CONFIG_PCI
2777         pci_module_init(&ipmi_pci_driver);
2778 #endif
2779
2780         if (si_trydefaults) {
2781                 mutex_lock(&smi_infos_lock);
2782                 if (list_empty(&smi_infos)) {
2783                         /* No BMC was found, try defaults. */
2784                         mutex_unlock(&smi_infos_lock);
2785                         default_find_bmc();
2786                 } else {
2787                         mutex_unlock(&smi_infos_lock);
2788                 }
2789         }
2790
2791         mutex_lock(&smi_infos_lock);
2792         if (unload_when_empty && list_empty(&smi_infos)) {
2793                 mutex_unlock(&smi_infos_lock);
2794 #ifdef CONFIG_PCI
2795                 pci_unregister_driver(&ipmi_pci_driver);
2796 #endif
2797                 driver_unregister(&ipmi_driver);
2798                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2799                 return -ENODEV;
2800         } else {
2801                 mutex_unlock(&smi_infos_lock);
2802                 return 0;
2803         }
2804 }
2805 module_init(init_ipmi_si);
2806
2807 static void cleanup_one_si(struct smi_info *to_clean)
2808 {
2809         int           rv;
2810         unsigned long flags;
2811
2812         if (!to_clean)
2813                 return;
2814
2815         list_del(&to_clean->link);
2816
2817         /* Tell the timer and interrupt handlers that we are shutting
2818            down. */
2819         spin_lock_irqsave(&(to_clean->si_lock), flags);
2820         spin_lock(&(to_clean->msg_lock));
2821
2822         atomic_inc(&to_clean->stop_operation);
2823
2824         if (to_clean->irq_cleanup)
2825                 to_clean->irq_cleanup(to_clean);
2826
2827         spin_unlock(&(to_clean->msg_lock));
2828         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2829
2830         /* Wait until we know that we are out of any interrupt
2831            handlers might have been running before we freed the
2832            interrupt. */
2833         synchronize_sched();
2834
2835         wait_for_timer_and_thread(to_clean);
2836
2837         /* Interrupts and timeouts are stopped, now make sure the
2838            interface is in a clean state. */
2839         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2840                 poll(to_clean);
2841                 schedule_timeout_uninterruptible(1);
2842         }
2843
2844         rv = ipmi_unregister_smi(to_clean->intf);
2845         if (rv) {
2846                 printk(KERN_ERR
2847                        "ipmi_si: Unable to unregister device: errno=%d\n",
2848                        rv);
2849         }
2850
2851         to_clean->handlers->cleanup(to_clean->si_sm);
2852
2853         kfree(to_clean->si_sm);
2854
2855         if (to_clean->addr_source_cleanup)
2856                 to_clean->addr_source_cleanup(to_clean);
2857         if (to_clean->io_cleanup)
2858                 to_clean->io_cleanup(to_clean);
2859
2860         if (to_clean->dev_registered)
2861                 platform_device_unregister(to_clean->pdev);
2862
2863         kfree(to_clean);
2864 }
2865
2866 static __exit void cleanup_ipmi_si(void)
2867 {
2868         struct smi_info *e, *tmp_e;
2869
2870         if (!initialized)
2871                 return;
2872
2873 #ifdef CONFIG_PCI
2874         pci_unregister_driver(&ipmi_pci_driver);
2875 #endif
2876
2877         mutex_lock(&smi_infos_lock);
2878         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2879                 cleanup_one_si(e);
2880         mutex_unlock(&smi_infos_lock);
2881
2882         driver_unregister(&ipmi_driver);
2883 }
2884 module_exit(cleanup_ipmi_si);
2885
2886 MODULE_LICENSE("GPL");
2887 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2888 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");