ipmi: add powerpc openfirmware sensing
[safe/jmp/linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 enum si_intf_state {
86         SI_NORMAL,
87         SI_GETTING_FLAGS,
88         SI_GETTING_EVENTS,
89         SI_CLEARING_FLAGS,
90         SI_CLEARING_FLAGS_THEN_SET_IRQ,
91         SI_GETTING_MESSAGES,
92         SI_ENABLE_INTERRUPTS1,
93         SI_ENABLE_INTERRUPTS2
94         /* FIXME - add watchdog stuff. */
95 };
96
97 /* Some BT-specific defines we need here. */
98 #define IPMI_BT_INTMASK_REG             2
99 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
100 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
101
102 enum si_type {
103     SI_KCS, SI_SMIC, SI_BT
104 };
105 static char *si_to_str[] = { "kcs", "smic", "bt" };
106
107 #define DEVICE_NAME "ipmi_si"
108
109 static struct device_driver ipmi_driver =
110 {
111         .name = DEVICE_NAME,
112         .bus = &platform_bus_type
113 };
114
115 struct smi_info
116 {
117         int                    intf_num;
118         ipmi_smi_t             intf;
119         struct si_sm_data      *si_sm;
120         struct si_sm_handlers  *handlers;
121         enum si_type           si_type;
122         spinlock_t             si_lock;
123         spinlock_t             msg_lock;
124         struct list_head       xmit_msgs;
125         struct list_head       hp_xmit_msgs;
126         struct ipmi_smi_msg    *curr_msg;
127         enum si_intf_state     si_state;
128
129         /* Used to handle the various types of I/O that can occur with
130            IPMI */
131         struct si_sm_io io;
132         int (*io_setup)(struct smi_info *info);
133         void (*io_cleanup)(struct smi_info *info);
134         int (*irq_setup)(struct smi_info *info);
135         void (*irq_cleanup)(struct smi_info *info);
136         unsigned int io_size;
137         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
138         void (*addr_source_cleanup)(struct smi_info *info);
139         void *addr_source_data;
140
141         /* Per-OEM handler, called from handle_flags().
142            Returns 1 when handle_flags() needs to be re-run
143            or 0 indicating it set si_state itself.
144         */
145         int (*oem_data_avail_handler)(struct smi_info *smi_info);
146
147         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
148            is set to hold the flags until we are done handling everything
149            from the flags. */
150 #define RECEIVE_MSG_AVAIL       0x01
151 #define EVENT_MSG_BUFFER_FULL   0x02
152 #define WDT_PRE_TIMEOUT_INT     0x08
153 #define OEM0_DATA_AVAIL     0x20
154 #define OEM1_DATA_AVAIL     0x40
155 #define OEM2_DATA_AVAIL     0x80
156 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
157                              OEM1_DATA_AVAIL | \
158                              OEM2_DATA_AVAIL)
159         unsigned char       msg_flags;
160
161         /* If set to true, this will request events the next time the
162            state machine is idle. */
163         atomic_t            req_events;
164
165         /* If true, run the state machine to completion on every send
166            call.  Generally used after a panic to make sure stuff goes
167            out. */
168         int                 run_to_completion;
169
170         /* The I/O port of an SI interface. */
171         int                 port;
172
173         /* The space between start addresses of the two ports.  For
174            instance, if the first port is 0xca2 and the spacing is 4, then
175            the second port is 0xca6. */
176         unsigned int        spacing;
177
178         /* zero if no irq; */
179         int                 irq;
180
181         /* The timer for this si. */
182         struct timer_list   si_timer;
183
184         /* The time (in jiffies) the last timeout occurred at. */
185         unsigned long       last_timeout_jiffies;
186
187         /* Used to gracefully stop the timer without race conditions. */
188         atomic_t            stop_operation;
189
190         /* The driver will disable interrupts when it gets into a
191            situation where it cannot handle messages due to lack of
192            memory.  Once that situation clears up, it will re-enable
193            interrupts. */
194         int interrupt_disabled;
195
196         /* From the get device id response... */
197         struct ipmi_device_id device_id;
198
199         /* Driver model stuff. */
200         struct device *dev;
201         struct platform_device *pdev;
202
203          /* True if we allocated the device, false if it came from
204           * someplace else (like PCI). */
205         int dev_registered;
206
207         /* Slave address, could be reported from DMI. */
208         unsigned char slave_addr;
209
210         /* Counters and things for the proc filesystem. */
211         spinlock_t count_lock;
212         unsigned long short_timeouts;
213         unsigned long long_timeouts;
214         unsigned long timeout_restarts;
215         unsigned long idles;
216         unsigned long interrupts;
217         unsigned long attentions;
218         unsigned long flag_fetches;
219         unsigned long hosed_count;
220         unsigned long complete_transactions;
221         unsigned long events;
222         unsigned long watchdog_pretimeouts;
223         unsigned long incoming_messages;
224
225         struct task_struct *thread;
226
227         struct list_head link;
228 };
229
230 #define SI_MAX_PARMS 4
231
232 static int force_kipmid[SI_MAX_PARMS];
233 static int num_force_kipmid;
234
235 static int unload_when_empty = 1;
236
237 static int try_smi_init(struct smi_info *smi);
238 static void cleanup_one_si(struct smi_info *to_clean);
239
240 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
241 static int register_xaction_notifier(struct notifier_block * nb)
242 {
243         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
244 }
245
246 static void deliver_recv_msg(struct smi_info *smi_info,
247                              struct ipmi_smi_msg *msg)
248 {
249         /* Deliver the message to the upper layer with the lock
250            released. */
251         spin_unlock(&(smi_info->si_lock));
252         ipmi_smi_msg_received(smi_info->intf, msg);
253         spin_lock(&(smi_info->si_lock));
254 }
255
256 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
257 {
258         struct ipmi_smi_msg *msg = smi_info->curr_msg;
259
260         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
261                 cCode = IPMI_ERR_UNSPECIFIED;
262         /* else use it as is */
263
264         /* Make it a reponse */
265         msg->rsp[0] = msg->data[0] | 4;
266         msg->rsp[1] = msg->data[1];
267         msg->rsp[2] = cCode;
268         msg->rsp_size = 3;
269
270         smi_info->curr_msg = NULL;
271         deliver_recv_msg(smi_info, msg);
272 }
273
274 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
275 {
276         int              rv;
277         struct list_head *entry = NULL;
278 #ifdef DEBUG_TIMING
279         struct timeval t;
280 #endif
281
282         /* No need to save flags, we aleady have interrupts off and we
283            already hold the SMI lock. */
284         spin_lock(&(smi_info->msg_lock));
285
286         /* Pick the high priority queue first. */
287         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
288                 entry = smi_info->hp_xmit_msgs.next;
289         } else if (!list_empty(&(smi_info->xmit_msgs))) {
290                 entry = smi_info->xmit_msgs.next;
291         }
292
293         if (!entry) {
294                 smi_info->curr_msg = NULL;
295                 rv = SI_SM_IDLE;
296         } else {
297                 int err;
298
299                 list_del(entry);
300                 smi_info->curr_msg = list_entry(entry,
301                                                 struct ipmi_smi_msg,
302                                                 link);
303 #ifdef DEBUG_TIMING
304                 do_gettimeofday(&t);
305                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
306 #endif
307                 err = atomic_notifier_call_chain(&xaction_notifier_list,
308                                 0, smi_info);
309                 if (err & NOTIFY_STOP_MASK) {
310                         rv = SI_SM_CALL_WITHOUT_DELAY;
311                         goto out;
312                 }
313                 err = smi_info->handlers->start_transaction(
314                         smi_info->si_sm,
315                         smi_info->curr_msg->data,
316                         smi_info->curr_msg->data_size);
317                 if (err) {
318                         return_hosed_msg(smi_info, err);
319                 }
320
321                 rv = SI_SM_CALL_WITHOUT_DELAY;
322         }
323         out:
324         spin_unlock(&(smi_info->msg_lock));
325
326         return rv;
327 }
328
329 static void start_enable_irq(struct smi_info *smi_info)
330 {
331         unsigned char msg[2];
332
333         /* If we are enabling interrupts, we have to tell the
334            BMC to use them. */
335         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
336         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
337
338         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
339         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
340 }
341
342 static void start_clear_flags(struct smi_info *smi_info)
343 {
344         unsigned char msg[3];
345
346         /* Make sure the watchdog pre-timeout flag is not set at startup. */
347         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
348         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
349         msg[2] = WDT_PRE_TIMEOUT_INT;
350
351         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
352         smi_info->si_state = SI_CLEARING_FLAGS;
353 }
354
355 /* When we have a situtaion where we run out of memory and cannot
356    allocate messages, we just leave them in the BMC and run the system
357    polled until we can allocate some memory.  Once we have some
358    memory, we will re-enable the interrupt. */
359 static inline void disable_si_irq(struct smi_info *smi_info)
360 {
361         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
362                 disable_irq_nosync(smi_info->irq);
363                 smi_info->interrupt_disabled = 1;
364         }
365 }
366
367 static inline void enable_si_irq(struct smi_info *smi_info)
368 {
369         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
370                 enable_irq(smi_info->irq);
371                 smi_info->interrupt_disabled = 0;
372         }
373 }
374
375 static void handle_flags(struct smi_info *smi_info)
376 {
377  retry:
378         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
379                 /* Watchdog pre-timeout */
380                 spin_lock(&smi_info->count_lock);
381                 smi_info->watchdog_pretimeouts++;
382                 spin_unlock(&smi_info->count_lock);
383
384                 start_clear_flags(smi_info);
385                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
386                 spin_unlock(&(smi_info->si_lock));
387                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
388                 spin_lock(&(smi_info->si_lock));
389         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
390                 /* Messages available. */
391                 smi_info->curr_msg = ipmi_alloc_smi_msg();
392                 if (!smi_info->curr_msg) {
393                         disable_si_irq(smi_info);
394                         smi_info->si_state = SI_NORMAL;
395                         return;
396                 }
397                 enable_si_irq(smi_info);
398
399                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
400                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
401                 smi_info->curr_msg->data_size = 2;
402
403                 smi_info->handlers->start_transaction(
404                         smi_info->si_sm,
405                         smi_info->curr_msg->data,
406                         smi_info->curr_msg->data_size);
407                 smi_info->si_state = SI_GETTING_MESSAGES;
408         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
409                 /* Events available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_EVENTS;
427         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
428                    smi_info->oem_data_avail_handler) {
429                 if (smi_info->oem_data_avail_handler(smi_info))
430                         goto retry;
431         } else {
432                 smi_info->si_state = SI_NORMAL;
433         }
434 }
435
436 static void handle_transaction_done(struct smi_info *smi_info)
437 {
438         struct ipmi_smi_msg *msg;
439 #ifdef DEBUG_TIMING
440         struct timeval t;
441
442         do_gettimeofday(&t);
443         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
444 #endif
445         switch (smi_info->si_state) {
446         case SI_NORMAL:
447                 if (!smi_info->curr_msg)
448                         break;
449
450                 smi_info->curr_msg->rsp_size
451                         = smi_info->handlers->get_result(
452                                 smi_info->si_sm,
453                                 smi_info->curr_msg->rsp,
454                                 IPMI_MAX_MSG_LENGTH);
455
456                 /* Do this here becase deliver_recv_msg() releases the
457                    lock, and a new message can be put in during the
458                    time the lock is released. */
459                 msg = smi_info->curr_msg;
460                 smi_info->curr_msg = NULL;
461                 deliver_recv_msg(smi_info, msg);
462                 break;
463
464         case SI_GETTING_FLAGS:
465         {
466                 unsigned char msg[4];
467                 unsigned int  len;
468
469                 /* We got the flags from the SMI, now handle them. */
470                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
471                 if (msg[2] != 0) {
472                         /* Error fetching flags, just give up for
473                            now. */
474                         smi_info->si_state = SI_NORMAL;
475                 } else if (len < 4) {
476                         /* Hmm, no flags.  That's technically illegal, but
477                            don't use uninitialized data. */
478                         smi_info->si_state = SI_NORMAL;
479                 } else {
480                         smi_info->msg_flags = msg[3];
481                         handle_flags(smi_info);
482                 }
483                 break;
484         }
485
486         case SI_CLEARING_FLAGS:
487         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
488         {
489                 unsigned char msg[3];
490
491                 /* We cleared the flags. */
492                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
493                 if (msg[2] != 0) {
494                         /* Error clearing flags */
495                         printk(KERN_WARNING
496                                "ipmi_si: Error clearing flags: %2.2x\n",
497                                msg[2]);
498                 }
499                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
500                         start_enable_irq(smi_info);
501                 else
502                         smi_info->si_state = SI_NORMAL;
503                 break;
504         }
505
506         case SI_GETTING_EVENTS:
507         {
508                 smi_info->curr_msg->rsp_size
509                         = smi_info->handlers->get_result(
510                                 smi_info->si_sm,
511                                 smi_info->curr_msg->rsp,
512                                 IPMI_MAX_MSG_LENGTH);
513
514                 /* Do this here becase deliver_recv_msg() releases the
515                    lock, and a new message can be put in during the
516                    time the lock is released. */
517                 msg = smi_info->curr_msg;
518                 smi_info->curr_msg = NULL;
519                 if (msg->rsp[2] != 0) {
520                         /* Error getting event, probably done. */
521                         msg->done(msg);
522
523                         /* Take off the event flag. */
524                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
525                         handle_flags(smi_info);
526                 } else {
527                         spin_lock(&smi_info->count_lock);
528                         smi_info->events++;
529                         spin_unlock(&smi_info->count_lock);
530
531                         /* Do this before we deliver the message
532                            because delivering the message releases the
533                            lock and something else can mess with the
534                            state. */
535                         handle_flags(smi_info);
536
537                         deliver_recv_msg(smi_info, msg);
538                 }
539                 break;
540         }
541
542         case SI_GETTING_MESSAGES:
543         {
544                 smi_info->curr_msg->rsp_size
545                         = smi_info->handlers->get_result(
546                                 smi_info->si_sm,
547                                 smi_info->curr_msg->rsp,
548                                 IPMI_MAX_MSG_LENGTH);
549
550                 /* Do this here becase deliver_recv_msg() releases the
551                    lock, and a new message can be put in during the
552                    time the lock is released. */
553                 msg = smi_info->curr_msg;
554                 smi_info->curr_msg = NULL;
555                 if (msg->rsp[2] != 0) {
556                         /* Error getting event, probably done. */
557                         msg->done(msg);
558
559                         /* Take off the msg flag. */
560                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
561                         handle_flags(smi_info);
562                 } else {
563                         spin_lock(&smi_info->count_lock);
564                         smi_info->incoming_messages++;
565                         spin_unlock(&smi_info->count_lock);
566
567                         /* Do this before we deliver the message
568                            because delivering the message releases the
569                            lock and something else can mess with the
570                            state. */
571                         handle_flags(smi_info);
572
573                         deliver_recv_msg(smi_info, msg);
574                 }
575                 break;
576         }
577
578         case SI_ENABLE_INTERRUPTS1:
579         {
580                 unsigned char msg[4];
581
582                 /* We got the flags from the SMI, now handle them. */
583                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
584                 if (msg[2] != 0) {
585                         printk(KERN_WARNING
586                                "ipmi_si: Could not enable interrupts"
587                                ", failed get, using polled mode.\n");
588                         smi_info->si_state = SI_NORMAL;
589                 } else {
590                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
591                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
592                         msg[2] = msg[3] | 1; /* enable msg queue int */
593                         smi_info->handlers->start_transaction(
594                                 smi_info->si_sm, msg, 3);
595                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
596                 }
597                 break;
598         }
599
600         case SI_ENABLE_INTERRUPTS2:
601         {
602                 unsigned char msg[4];
603
604                 /* We got the flags from the SMI, now handle them. */
605                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
606                 if (msg[2] != 0) {
607                         printk(KERN_WARNING
608                                "ipmi_si: Could not enable interrupts"
609                                ", failed set, using polled mode.\n");
610                 }
611                 smi_info->si_state = SI_NORMAL;
612                 break;
613         }
614         }
615 }
616
617 /* Called on timeouts and events.  Timeouts should pass the elapsed
618    time, interrupts should pass in zero. */
619 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
620                                            int time)
621 {
622         enum si_sm_result si_sm_result;
623
624  restart:
625         /* There used to be a loop here that waited a little while
626            (around 25us) before giving up.  That turned out to be
627            pointless, the minimum delays I was seeing were in the 300us
628            range, which is far too long to wait in an interrupt.  So
629            we just run until the state machine tells us something
630            happened or it needs a delay. */
631         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
632         time = 0;
633         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
634         {
635                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
636         }
637
638         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
639         {
640                 spin_lock(&smi_info->count_lock);
641                 smi_info->complete_transactions++;
642                 spin_unlock(&smi_info->count_lock);
643
644                 handle_transaction_done(smi_info);
645                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
646         }
647         else if (si_sm_result == SI_SM_HOSED)
648         {
649                 spin_lock(&smi_info->count_lock);
650                 smi_info->hosed_count++;
651                 spin_unlock(&smi_info->count_lock);
652
653                 /* Do the before return_hosed_msg, because that
654                    releases the lock. */
655                 smi_info->si_state = SI_NORMAL;
656                 if (smi_info->curr_msg != NULL) {
657                         /* If we were handling a user message, format
658                            a response to send to the upper layer to
659                            tell it about the error. */
660                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
661                 }
662                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
663         }
664
665         /* We prefer handling attn over new messages. */
666         if (si_sm_result == SI_SM_ATTN)
667         {
668                 unsigned char msg[2];
669
670                 spin_lock(&smi_info->count_lock);
671                 smi_info->attentions++;
672                 spin_unlock(&smi_info->count_lock);
673
674                 /* Got a attn, send down a get message flags to see
675                    what's causing it.  It would be better to handle
676                    this in the upper layer, but due to the way
677                    interrupts work with the SMI, that's not really
678                    possible. */
679                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
680                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
681
682                 smi_info->handlers->start_transaction(
683                         smi_info->si_sm, msg, 2);
684                 smi_info->si_state = SI_GETTING_FLAGS;
685                 goto restart;
686         }
687
688         /* If we are currently idle, try to start the next message. */
689         if (si_sm_result == SI_SM_IDLE) {
690                 spin_lock(&smi_info->count_lock);
691                 smi_info->idles++;
692                 spin_unlock(&smi_info->count_lock);
693
694                 si_sm_result = start_next_msg(smi_info);
695                 if (si_sm_result != SI_SM_IDLE)
696                         goto restart;
697         }
698
699         if ((si_sm_result == SI_SM_IDLE)
700             && (atomic_read(&smi_info->req_events)))
701         {
702                 /* We are idle and the upper layer requested that I fetch
703                    events, so do so. */
704                 atomic_set(&smi_info->req_events, 0);
705
706                 smi_info->curr_msg = ipmi_alloc_smi_msg();
707                 if (!smi_info->curr_msg)
708                         goto out;
709
710                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
711                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
712                 smi_info->curr_msg->data_size = 2;
713
714                 smi_info->handlers->start_transaction(
715                         smi_info->si_sm,
716                         smi_info->curr_msg->data,
717                         smi_info->curr_msg->data_size);
718                 smi_info->si_state = SI_GETTING_EVENTS;
719                 goto restart;
720         }
721  out:
722         return si_sm_result;
723 }
724
725 static void sender(void                *send_info,
726                    struct ipmi_smi_msg *msg,
727                    int                 priority)
728 {
729         struct smi_info   *smi_info = send_info;
730         enum si_sm_result result;
731         unsigned long     flags;
732 #ifdef DEBUG_TIMING
733         struct timeval    t;
734 #endif
735
736         if (atomic_read(&smi_info->stop_operation)) {
737                 msg->rsp[0] = msg->data[0] | 4;
738                 msg->rsp[1] = msg->data[1];
739                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
740                 msg->rsp_size = 3;
741                 deliver_recv_msg(smi_info, msg);
742                 return;
743         }
744
745         spin_lock_irqsave(&(smi_info->msg_lock), flags);
746 #ifdef DEBUG_TIMING
747         do_gettimeofday(&t);
748         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
749 #endif
750
751         if (smi_info->run_to_completion) {
752                 /* If we are running to completion, then throw it in
753                    the list and run transactions until everything is
754                    clear.  Priority doesn't matter here. */
755                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
756
757                 /* We have to release the msg lock and claim the smi
758                    lock in this case, because of race conditions. */
759                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
760
761                 spin_lock_irqsave(&(smi_info->si_lock), flags);
762                 result = smi_event_handler(smi_info, 0);
763                 while (result != SI_SM_IDLE) {
764                         udelay(SI_SHORT_TIMEOUT_USEC);
765                         result = smi_event_handler(smi_info,
766                                                    SI_SHORT_TIMEOUT_USEC);
767                 }
768                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
769                 return;
770         } else {
771                 if (priority > 0) {
772                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
773                 } else {
774                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
775                 }
776         }
777         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
778
779         spin_lock_irqsave(&(smi_info->si_lock), flags);
780         if ((smi_info->si_state == SI_NORMAL)
781             && (smi_info->curr_msg == NULL))
782         {
783                 start_next_msg(smi_info);
784         }
785         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
786 }
787
788 static void set_run_to_completion(void *send_info, int i_run_to_completion)
789 {
790         struct smi_info   *smi_info = send_info;
791         enum si_sm_result result;
792         unsigned long     flags;
793
794         spin_lock_irqsave(&(smi_info->si_lock), flags);
795
796         smi_info->run_to_completion = i_run_to_completion;
797         if (i_run_to_completion) {
798                 result = smi_event_handler(smi_info, 0);
799                 while (result != SI_SM_IDLE) {
800                         udelay(SI_SHORT_TIMEOUT_USEC);
801                         result = smi_event_handler(smi_info,
802                                                    SI_SHORT_TIMEOUT_USEC);
803                 }
804         }
805
806         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
807 }
808
809 static int ipmi_thread(void *data)
810 {
811         struct smi_info *smi_info = data;
812         unsigned long flags;
813         enum si_sm_result smi_result;
814
815         set_user_nice(current, 19);
816         while (!kthread_should_stop()) {
817                 spin_lock_irqsave(&(smi_info->si_lock), flags);
818                 smi_result = smi_event_handler(smi_info, 0);
819                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
820                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
821                         /* do nothing */
822                 }
823                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
824                         schedule();
825                 else
826                         schedule_timeout_interruptible(1);
827         }
828         return 0;
829 }
830
831
832 static void poll(void *send_info)
833 {
834         struct smi_info *smi_info = send_info;
835
836         /*
837          * Make sure there is some delay in the poll loop so we can
838          * drive time forward and timeout things.
839          */
840         udelay(10);
841         smi_event_handler(smi_info, 10);
842 }
843
844 static void request_events(void *send_info)
845 {
846         struct smi_info *smi_info = send_info;
847
848         if (atomic_read(&smi_info->stop_operation))
849                 return;
850
851         atomic_set(&smi_info->req_events, 1);
852 }
853
854 static int initialized;
855
856 static void smi_timeout(unsigned long data)
857 {
858         struct smi_info   *smi_info = (struct smi_info *) data;
859         enum si_sm_result smi_result;
860         unsigned long     flags;
861         unsigned long     jiffies_now;
862         long              time_diff;
863 #ifdef DEBUG_TIMING
864         struct timeval    t;
865 #endif
866
867         if (atomic_read(&smi_info->stop_operation))
868                 return;
869
870         spin_lock_irqsave(&(smi_info->si_lock), flags);
871 #ifdef DEBUG_TIMING
872         do_gettimeofday(&t);
873         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
874 #endif
875         jiffies_now = jiffies;
876         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
877                      * SI_USEC_PER_JIFFY);
878         smi_result = smi_event_handler(smi_info, time_diff);
879
880         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881
882         smi_info->last_timeout_jiffies = jiffies_now;
883
884         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
885                 /* Running with interrupts, only do long timeouts. */
886                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
887                 spin_lock_irqsave(&smi_info->count_lock, flags);
888                 smi_info->long_timeouts++;
889                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
890                 goto do_add_timer;
891         }
892
893         /* If the state machine asks for a short delay, then shorten
894            the timer timeout. */
895         if (smi_result == SI_SM_CALL_WITH_DELAY) {
896                 spin_lock_irqsave(&smi_info->count_lock, flags);
897                 smi_info->short_timeouts++;
898                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
899                 smi_info->si_timer.expires = jiffies + 1;
900         } else {
901                 spin_lock_irqsave(&smi_info->count_lock, flags);
902                 smi_info->long_timeouts++;
903                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
904                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
905         }
906
907  do_add_timer:
908         add_timer(&(smi_info->si_timer));
909 }
910
911 static irqreturn_t si_irq_handler(int irq, void *data)
912 {
913         struct smi_info *smi_info = data;
914         unsigned long   flags;
915 #ifdef DEBUG_TIMING
916         struct timeval  t;
917 #endif
918
919         spin_lock_irqsave(&(smi_info->si_lock), flags);
920
921         spin_lock(&smi_info->count_lock);
922         smi_info->interrupts++;
923         spin_unlock(&smi_info->count_lock);
924
925         if (atomic_read(&smi_info->stop_operation))
926                 goto out;
927
928 #ifdef DEBUG_TIMING
929         do_gettimeofday(&t);
930         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
931 #endif
932         smi_event_handler(smi_info, 0);
933  out:
934         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
935         return IRQ_HANDLED;
936 }
937
938 static irqreturn_t si_bt_irq_handler(int irq, void *data)
939 {
940         struct smi_info *smi_info = data;
941         /* We need to clear the IRQ flag for the BT interface. */
942         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
943                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
944                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
945         return si_irq_handler(irq, data);
946 }
947
948 static int smi_start_processing(void       *send_info,
949                                 ipmi_smi_t intf)
950 {
951         struct smi_info *new_smi = send_info;
952         int             enable = 0;
953
954         new_smi->intf = intf;
955
956         /* Set up the timer that drives the interface. */
957         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
958         new_smi->last_timeout_jiffies = jiffies;
959         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
960
961         /*
962          * Check if the user forcefully enabled the daemon.
963          */
964         if (new_smi->intf_num < num_force_kipmid)
965                 enable = force_kipmid[new_smi->intf_num];
966         /*
967          * The BT interface is efficient enough to not need a thread,
968          * and there is no need for a thread if we have interrupts.
969          */
970         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
971                 enable = 1;
972
973         if (enable) {
974                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
975                                               "kipmi%d", new_smi->intf_num);
976                 if (IS_ERR(new_smi->thread)) {
977                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
978                                " kernel thread due to error %ld, only using"
979                                " timers to drive the interface\n",
980                                PTR_ERR(new_smi->thread));
981                         new_smi->thread = NULL;
982                 }
983         }
984
985         return 0;
986 }
987
988 static void set_maintenance_mode(void *send_info, int enable)
989 {
990         struct smi_info   *smi_info = send_info;
991
992         if (!enable)
993                 atomic_set(&smi_info->req_events, 0);
994 }
995
996 static struct ipmi_smi_handlers handlers =
997 {
998         .owner                  = THIS_MODULE,
999         .start_processing       = smi_start_processing,
1000         .sender                 = sender,
1001         .request_events         = request_events,
1002         .set_maintenance_mode   = set_maintenance_mode,
1003         .set_run_to_completion  = set_run_to_completion,
1004         .poll                   = poll,
1005 };
1006
1007 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1008    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1009
1010 static LIST_HEAD(smi_infos);
1011 static DEFINE_MUTEX(smi_infos_lock);
1012 static int smi_num; /* Used to sequence the SMIs */
1013
1014 #define DEFAULT_REGSPACING      1
1015 #define DEFAULT_REGSIZE         1
1016
1017 static int           si_trydefaults = 1;
1018 static char          *si_type[SI_MAX_PARMS];
1019 #define MAX_SI_TYPE_STR 30
1020 static char          si_type_str[MAX_SI_TYPE_STR];
1021 static unsigned long addrs[SI_MAX_PARMS];
1022 static int num_addrs;
1023 static unsigned int  ports[SI_MAX_PARMS];
1024 static int num_ports;
1025 static int           irqs[SI_MAX_PARMS];
1026 static int num_irqs;
1027 static int           regspacings[SI_MAX_PARMS];
1028 static int num_regspacings;
1029 static int           regsizes[SI_MAX_PARMS];
1030 static int num_regsizes;
1031 static int           regshifts[SI_MAX_PARMS];
1032 static int num_regshifts;
1033 static int slave_addrs[SI_MAX_PARMS];
1034 static int num_slave_addrs;
1035
1036 #define IPMI_IO_ADDR_SPACE  0
1037 #define IPMI_MEM_ADDR_SPACE 1
1038 static char *addr_space_to_str[] = { "i/o", "mem" };
1039
1040 static int hotmod_handler(const char *val, struct kernel_param *kp);
1041
1042 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1043 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1044                  " Documentation/IPMI.txt in the kernel sources for the"
1045                  " gory details.");
1046
1047 module_param_named(trydefaults, si_trydefaults, bool, 0);
1048 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1049                  " default scan of the KCS and SMIC interface at the standard"
1050                  " address");
1051 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1052 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1053                  " interface separated by commas.  The types are 'kcs',"
1054                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1055                  " the first interface to kcs and the second to bt");
1056 module_param_array(addrs, long, &num_addrs, 0);
1057 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1058                  " addresses separated by commas.  Only use if an interface"
1059                  " is in memory.  Otherwise, set it to zero or leave"
1060                  " it blank.");
1061 module_param_array(ports, int, &num_ports, 0);
1062 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1063                  " addresses separated by commas.  Only use if an interface"
1064                  " is a port.  Otherwise, set it to zero or leave"
1065                  " it blank.");
1066 module_param_array(irqs, int, &num_irqs, 0);
1067 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1068                  " addresses separated by commas.  Only use if an interface"
1069                  " has an interrupt.  Otherwise, set it to zero or leave"
1070                  " it blank.");
1071 module_param_array(regspacings, int, &num_regspacings, 0);
1072 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1073                  " and each successive register used by the interface.  For"
1074                  " instance, if the start address is 0xca2 and the spacing"
1075                  " is 2, then the second address is at 0xca4.  Defaults"
1076                  " to 1.");
1077 module_param_array(regsizes, int, &num_regsizes, 0);
1078 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1079                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1080                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1081                  " the 8-bit IPMI register has to be read from a larger"
1082                  " register.");
1083 module_param_array(regshifts, int, &num_regshifts, 0);
1084 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1085                  " IPMI register, in bits.  For instance, if the data"
1086                  " is read from a 32-bit word and the IPMI data is in"
1087                  " bit 8-15, then the shift would be 8");
1088 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1089 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1090                  " the controller.  Normally this is 0x20, but can be"
1091                  " overridden by this parm.  This is an array indexed"
1092                  " by interface number.");
1093 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1094 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1095                  " disabled(0).  Normally the IPMI driver auto-detects"
1096                  " this, but the value may be overridden by this parm.");
1097 module_param(unload_when_empty, int, 0);
1098 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1099                  " specified or found, default is 1.  Setting to 0"
1100                  " is useful for hot add of devices using hotmod.");
1101
1102
1103 static void std_irq_cleanup(struct smi_info *info)
1104 {
1105         if (info->si_type == SI_BT)
1106                 /* Disable the interrupt in the BT interface. */
1107                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1108         free_irq(info->irq, info);
1109 }
1110
1111 static int std_irq_setup(struct smi_info *info)
1112 {
1113         int rv;
1114
1115         if (!info->irq)
1116                 return 0;
1117
1118         if (info->si_type == SI_BT) {
1119                 rv = request_irq(info->irq,
1120                                  si_bt_irq_handler,
1121                                  IRQF_DISABLED,
1122                                  DEVICE_NAME,
1123                                  info);
1124                 if (!rv)
1125                         /* Enable the interrupt in the BT interface. */
1126                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1127                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1128         } else
1129                 rv = request_irq(info->irq,
1130                                  si_irq_handler,
1131                                  IRQF_DISABLED,
1132                                  DEVICE_NAME,
1133                                  info);
1134         if (rv) {
1135                 printk(KERN_WARNING
1136                        "ipmi_si: %s unable to claim interrupt %d,"
1137                        " running polled\n",
1138                        DEVICE_NAME, info->irq);
1139                 info->irq = 0;
1140         } else {
1141                 info->irq_cleanup = std_irq_cleanup;
1142                 printk("  Using irq %d\n", info->irq);
1143         }
1144
1145         return rv;
1146 }
1147
1148 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1149 {
1150         unsigned int addr = io->addr_data;
1151
1152         return inb(addr + (offset * io->regspacing));
1153 }
1154
1155 static void port_outb(struct si_sm_io *io, unsigned int offset,
1156                       unsigned char b)
1157 {
1158         unsigned int addr = io->addr_data;
1159
1160         outb(b, addr + (offset * io->regspacing));
1161 }
1162
1163 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1164 {
1165         unsigned int addr = io->addr_data;
1166
1167         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1168 }
1169
1170 static void port_outw(struct si_sm_io *io, unsigned int offset,
1171                       unsigned char b)
1172 {
1173         unsigned int addr = io->addr_data;
1174
1175         outw(b << io->regshift, addr + (offset * io->regspacing));
1176 }
1177
1178 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1179 {
1180         unsigned int addr = io->addr_data;
1181
1182         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1183 }
1184
1185 static void port_outl(struct si_sm_io *io, unsigned int offset,
1186                       unsigned char b)
1187 {
1188         unsigned int addr = io->addr_data;
1189
1190         outl(b << io->regshift, addr+(offset * io->regspacing));
1191 }
1192
1193 static void port_cleanup(struct smi_info *info)
1194 {
1195         unsigned int addr = info->io.addr_data;
1196         int          idx;
1197
1198         if (addr) {
1199                 for (idx = 0; idx < info->io_size; idx++) {
1200                         release_region(addr + idx * info->io.regspacing,
1201                                        info->io.regsize);
1202                 }
1203         }
1204 }
1205
1206 static int port_setup(struct smi_info *info)
1207 {
1208         unsigned int addr = info->io.addr_data;
1209         int          idx;
1210
1211         if (!addr)
1212                 return -ENODEV;
1213
1214         info->io_cleanup = port_cleanup;
1215
1216         /* Figure out the actual inb/inw/inl/etc routine to use based
1217            upon the register size. */
1218         switch (info->io.regsize) {
1219         case 1:
1220                 info->io.inputb = port_inb;
1221                 info->io.outputb = port_outb;
1222                 break;
1223         case 2:
1224                 info->io.inputb = port_inw;
1225                 info->io.outputb = port_outw;
1226                 break;
1227         case 4:
1228                 info->io.inputb = port_inl;
1229                 info->io.outputb = port_outl;
1230                 break;
1231         default:
1232                 printk("ipmi_si: Invalid register size: %d\n",
1233                        info->io.regsize);
1234                 return -EINVAL;
1235         }
1236
1237         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1238          * tables.  This causes problems when trying to register the
1239          * entire I/O region.  Therefore we must register each I/O
1240          * port separately.
1241          */
1242         for (idx = 0; idx < info->io_size; idx++) {
1243                 if (request_region(addr + idx * info->io.regspacing,
1244                                    info->io.regsize, DEVICE_NAME) == NULL) {
1245                         /* Undo allocations */
1246                         while (idx--) {
1247                                 release_region(addr + idx * info->io.regspacing,
1248                                                info->io.regsize);
1249                         }
1250                         return -EIO;
1251                 }
1252         }
1253         return 0;
1254 }
1255
1256 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1257 {
1258         return readb((io->addr)+(offset * io->regspacing));
1259 }
1260
1261 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1262                      unsigned char b)
1263 {
1264         writeb(b, (io->addr)+(offset * io->regspacing));
1265 }
1266
1267 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1268 {
1269         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1270                 & 0xff;
1271 }
1272
1273 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1274                      unsigned char b)
1275 {
1276         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1277 }
1278
1279 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1280 {
1281         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1282                 & 0xff;
1283 }
1284
1285 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1286                      unsigned char b)
1287 {
1288         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1289 }
1290
1291 #ifdef readq
1292 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1293 {
1294         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1295                 & 0xff;
1296 }
1297
1298 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1299                      unsigned char b)
1300 {
1301         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1302 }
1303 #endif
1304
1305 static void mem_cleanup(struct smi_info *info)
1306 {
1307         unsigned long addr = info->io.addr_data;
1308         int           mapsize;
1309
1310         if (info->io.addr) {
1311                 iounmap(info->io.addr);
1312
1313                 mapsize = ((info->io_size * info->io.regspacing)
1314                            - (info->io.regspacing - info->io.regsize));
1315
1316                 release_mem_region(addr, mapsize);
1317         }
1318 }
1319
1320 static int mem_setup(struct smi_info *info)
1321 {
1322         unsigned long addr = info->io.addr_data;
1323         int           mapsize;
1324
1325         if (!addr)
1326                 return -ENODEV;
1327
1328         info->io_cleanup = mem_cleanup;
1329
1330         /* Figure out the actual readb/readw/readl/etc routine to use based
1331            upon the register size. */
1332         switch (info->io.regsize) {
1333         case 1:
1334                 info->io.inputb = intf_mem_inb;
1335                 info->io.outputb = intf_mem_outb;
1336                 break;
1337         case 2:
1338                 info->io.inputb = intf_mem_inw;
1339                 info->io.outputb = intf_mem_outw;
1340                 break;
1341         case 4:
1342                 info->io.inputb = intf_mem_inl;
1343                 info->io.outputb = intf_mem_outl;
1344                 break;
1345 #ifdef readq
1346         case 8:
1347                 info->io.inputb = mem_inq;
1348                 info->io.outputb = mem_outq;
1349                 break;
1350 #endif
1351         default:
1352                 printk("ipmi_si: Invalid register size: %d\n",
1353                        info->io.regsize);
1354                 return -EINVAL;
1355         }
1356
1357         /* Calculate the total amount of memory to claim.  This is an
1358          * unusual looking calculation, but it avoids claiming any
1359          * more memory than it has to.  It will claim everything
1360          * between the first address to the end of the last full
1361          * register. */
1362         mapsize = ((info->io_size * info->io.regspacing)
1363                    - (info->io.regspacing - info->io.regsize));
1364
1365         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1366                 return -EIO;
1367
1368         info->io.addr = ioremap(addr, mapsize);
1369         if (info->io.addr == NULL) {
1370                 release_mem_region(addr, mapsize);
1371                 return -EIO;
1372         }
1373         return 0;
1374 }
1375
1376 /*
1377  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1378  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1379  * Options are:
1380  *   rsp=<regspacing>
1381  *   rsi=<regsize>
1382  *   rsh=<regshift>
1383  *   irq=<irq>
1384  *   ipmb=<ipmb addr>
1385  */
1386 enum hotmod_op { HM_ADD, HM_REMOVE };
1387 struct hotmod_vals {
1388         char *name;
1389         int  val;
1390 };
1391 static struct hotmod_vals hotmod_ops[] = {
1392         { "add",        HM_ADD },
1393         { "remove",     HM_REMOVE },
1394         { NULL }
1395 };
1396 static struct hotmod_vals hotmod_si[] = {
1397         { "kcs",        SI_KCS },
1398         { "smic",       SI_SMIC },
1399         { "bt",         SI_BT },
1400         { NULL }
1401 };
1402 static struct hotmod_vals hotmod_as[] = {
1403         { "mem",        IPMI_MEM_ADDR_SPACE },
1404         { "i/o",        IPMI_IO_ADDR_SPACE },
1405         { NULL }
1406 };
1407
1408 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1409 {
1410         char *s;
1411         int  i;
1412
1413         s = strchr(*curr, ',');
1414         if (!s) {
1415                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1416                 return -EINVAL;
1417         }
1418         *s = '\0';
1419         s++;
1420         for (i = 0; hotmod_ops[i].name; i++) {
1421                 if (strcmp(*curr, v[i].name) == 0) {
1422                         *val = v[i].val;
1423                         *curr = s;
1424                         return 0;
1425                 }
1426         }
1427
1428         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1429         return -EINVAL;
1430 }
1431
1432 static int check_hotmod_int_op(const char *curr, const char *option,
1433                                const char *name, int *val)
1434 {
1435         char *n;
1436
1437         if (strcmp(curr, name) == 0) {
1438                 if (!option) {
1439                         printk(KERN_WARNING PFX
1440                                "No option given for '%s'\n",
1441                                curr);
1442                         return -EINVAL;
1443                 }
1444                 *val = simple_strtoul(option, &n, 0);
1445                 if ((*n != '\0') || (*option == '\0')) {
1446                         printk(KERN_WARNING PFX
1447                                "Bad option given for '%s'\n",
1448                                curr);
1449                         return -EINVAL;
1450                 }
1451                 return 1;
1452         }
1453         return 0;
1454 }
1455
1456 static int hotmod_handler(const char *val, struct kernel_param *kp)
1457 {
1458         char *str = kstrdup(val, GFP_KERNEL);
1459         int  rv;
1460         char *next, *curr, *s, *n, *o;
1461         enum hotmod_op op;
1462         enum si_type si_type;
1463         int  addr_space;
1464         unsigned long addr;
1465         int regspacing;
1466         int regsize;
1467         int regshift;
1468         int irq;
1469         int ipmb;
1470         int ival;
1471         int len;
1472         struct smi_info *info;
1473
1474         if (!str)
1475                 return -ENOMEM;
1476
1477         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1478         len = strlen(str);
1479         ival = len - 1;
1480         while ((ival >= 0) && isspace(str[ival])) {
1481                 str[ival] = '\0';
1482                 ival--;
1483         }
1484
1485         for (curr = str; curr; curr = next) {
1486                 regspacing = 1;
1487                 regsize = 1;
1488                 regshift = 0;
1489                 irq = 0;
1490                 ipmb = 0x20;
1491
1492                 next = strchr(curr, ':');
1493                 if (next) {
1494                         *next = '\0';
1495                         next++;
1496                 }
1497
1498                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1499                 if (rv)
1500                         break;
1501                 op = ival;
1502
1503                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1504                 if (rv)
1505                         break;
1506                 si_type = ival;
1507
1508                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1509                 if (rv)
1510                         break;
1511
1512                 s = strchr(curr, ',');
1513                 if (s) {
1514                         *s = '\0';
1515                         s++;
1516                 }
1517                 addr = simple_strtoul(curr, &n, 0);
1518                 if ((*n != '\0') || (*curr == '\0')) {
1519                         printk(KERN_WARNING PFX "Invalid hotmod address"
1520                                " '%s'\n", curr);
1521                         break;
1522                 }
1523
1524                 while (s) {
1525                         curr = s;
1526                         s = strchr(curr, ',');
1527                         if (s) {
1528                                 *s = '\0';
1529                                 s++;
1530                         }
1531                         o = strchr(curr, '=');
1532                         if (o) {
1533                                 *o = '\0';
1534                                 o++;
1535                         }
1536                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1537                         if (rv < 0)
1538                                 goto out;
1539                         else if (rv)
1540                                 continue;
1541                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1542                         if (rv < 0)
1543                                 goto out;
1544                         else if (rv)
1545                                 continue;
1546                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1547                         if (rv < 0)
1548                                 goto out;
1549                         else if (rv)
1550                                 continue;
1551                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1552                         if (rv < 0)
1553                                 goto out;
1554                         else if (rv)
1555                                 continue;
1556                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1557                         if (rv < 0)
1558                                 goto out;
1559                         else if (rv)
1560                                 continue;
1561
1562                         rv = -EINVAL;
1563                         printk(KERN_WARNING PFX
1564                                "Invalid hotmod option '%s'\n",
1565                                curr);
1566                         goto out;
1567                 }
1568
1569                 if (op == HM_ADD) {
1570                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1571                         if (!info) {
1572                                 rv = -ENOMEM;
1573                                 goto out;
1574                         }
1575
1576                         info->addr_source = "hotmod";
1577                         info->si_type = si_type;
1578                         info->io.addr_data = addr;
1579                         info->io.addr_type = addr_space;
1580                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1581                                 info->io_setup = mem_setup;
1582                         else
1583                                 info->io_setup = port_setup;
1584
1585                         info->io.addr = NULL;
1586                         info->io.regspacing = regspacing;
1587                         if (!info->io.regspacing)
1588                                 info->io.regspacing = DEFAULT_REGSPACING;
1589                         info->io.regsize = regsize;
1590                         if (!info->io.regsize)
1591                                 info->io.regsize = DEFAULT_REGSPACING;
1592                         info->io.regshift = regshift;
1593                         info->irq = irq;
1594                         if (info->irq)
1595                                 info->irq_setup = std_irq_setup;
1596                         info->slave_addr = ipmb;
1597
1598                         try_smi_init(info);
1599                 } else {
1600                         /* remove */
1601                         struct smi_info *e, *tmp_e;
1602
1603                         mutex_lock(&smi_infos_lock);
1604                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1605                                 if (e->io.addr_type != addr_space)
1606                                         continue;
1607                                 if (e->si_type != si_type)
1608                                         continue;
1609                                 if (e->io.addr_data == addr)
1610                                         cleanup_one_si(e);
1611                         }
1612                         mutex_unlock(&smi_infos_lock);
1613                 }
1614         }
1615         rv = len;
1616  out:
1617         kfree(str);
1618         return rv;
1619 }
1620
1621 static __devinit void hardcode_find_bmc(void)
1622 {
1623         int             i;
1624         struct smi_info *info;
1625
1626         for (i = 0; i < SI_MAX_PARMS; i++) {
1627                 if (!ports[i] && !addrs[i])
1628                         continue;
1629
1630                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1631                 if (!info)
1632                         return;
1633
1634                 info->addr_source = "hardcoded";
1635
1636                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1637                         info->si_type = SI_KCS;
1638                 } else if (strcmp(si_type[i], "smic") == 0) {
1639                         info->si_type = SI_SMIC;
1640                 } else if (strcmp(si_type[i], "bt") == 0) {
1641                         info->si_type = SI_BT;
1642                 } else {
1643                         printk(KERN_WARNING
1644                                "ipmi_si: Interface type specified "
1645                                "for interface %d, was invalid: %s\n",
1646                                i, si_type[i]);
1647                         kfree(info);
1648                         continue;
1649                 }
1650
1651                 if (ports[i]) {
1652                         /* An I/O port */
1653                         info->io_setup = port_setup;
1654                         info->io.addr_data = ports[i];
1655                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1656                 } else if (addrs[i]) {
1657                         /* A memory port */
1658                         info->io_setup = mem_setup;
1659                         info->io.addr_data = addrs[i];
1660                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1661                 } else {
1662                         printk(KERN_WARNING
1663                                "ipmi_si: Interface type specified "
1664                                "for interface %d, "
1665                                "but port and address were not set or "
1666                                "set to zero.\n", i);
1667                         kfree(info);
1668                         continue;
1669                 }
1670
1671                 info->io.addr = NULL;
1672                 info->io.regspacing = regspacings[i];
1673                 if (!info->io.regspacing)
1674                         info->io.regspacing = DEFAULT_REGSPACING;
1675                 info->io.regsize = regsizes[i];
1676                 if (!info->io.regsize)
1677                         info->io.regsize = DEFAULT_REGSPACING;
1678                 info->io.regshift = regshifts[i];
1679                 info->irq = irqs[i];
1680                 if (info->irq)
1681                         info->irq_setup = std_irq_setup;
1682
1683                 try_smi_init(info);
1684         }
1685 }
1686
1687 #ifdef CONFIG_ACPI
1688
1689 #include <linux/acpi.h>
1690
1691 /* Once we get an ACPI failure, we don't try any more, because we go
1692    through the tables sequentially.  Once we don't find a table, there
1693    are no more. */
1694 static int acpi_failure;
1695
1696 /* For GPE-type interrupts. */
1697 static u32 ipmi_acpi_gpe(void *context)
1698 {
1699         struct smi_info *smi_info = context;
1700         unsigned long   flags;
1701 #ifdef DEBUG_TIMING
1702         struct timeval t;
1703 #endif
1704
1705         spin_lock_irqsave(&(smi_info->si_lock), flags);
1706
1707         spin_lock(&smi_info->count_lock);
1708         smi_info->interrupts++;
1709         spin_unlock(&smi_info->count_lock);
1710
1711         if (atomic_read(&smi_info->stop_operation))
1712                 goto out;
1713
1714 #ifdef DEBUG_TIMING
1715         do_gettimeofday(&t);
1716         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1717 #endif
1718         smi_event_handler(smi_info, 0);
1719  out:
1720         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1721
1722         return ACPI_INTERRUPT_HANDLED;
1723 }
1724
1725 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1726 {
1727         if (!info->irq)
1728                 return;
1729
1730         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1731 }
1732
1733 static int acpi_gpe_irq_setup(struct smi_info *info)
1734 {
1735         acpi_status status;
1736
1737         if (!info->irq)
1738                 return 0;
1739
1740         /* FIXME - is level triggered right? */
1741         status = acpi_install_gpe_handler(NULL,
1742                                           info->irq,
1743                                           ACPI_GPE_LEVEL_TRIGGERED,
1744                                           &ipmi_acpi_gpe,
1745                                           info);
1746         if (status != AE_OK) {
1747                 printk(KERN_WARNING
1748                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1749                        " running polled\n",
1750                        DEVICE_NAME, info->irq);
1751                 info->irq = 0;
1752                 return -EINVAL;
1753         } else {
1754                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1755                 printk("  Using ACPI GPE %d\n", info->irq);
1756                 return 0;
1757         }
1758 }
1759
1760 /*
1761  * Defined at
1762  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1763  */
1764 struct SPMITable {
1765         s8      Signature[4];
1766         u32     Length;
1767         u8      Revision;
1768         u8      Checksum;
1769         s8      OEMID[6];
1770         s8      OEMTableID[8];
1771         s8      OEMRevision[4];
1772         s8      CreatorID[4];
1773         s8      CreatorRevision[4];
1774         u8      InterfaceType;
1775         u8      IPMIlegacy;
1776         s16     SpecificationRevision;
1777
1778         /*
1779          * Bit 0 - SCI interrupt supported
1780          * Bit 1 - I/O APIC/SAPIC
1781          */
1782         u8      InterruptType;
1783
1784         /* If bit 0 of InterruptType is set, then this is the SCI
1785            interrupt in the GPEx_STS register. */
1786         u8      GPE;
1787
1788         s16     Reserved;
1789
1790         /* If bit 1 of InterruptType is set, then this is the I/O
1791            APIC/SAPIC interrupt. */
1792         u32     GlobalSystemInterrupt;
1793
1794         /* The actual register address. */
1795         struct acpi_generic_address addr;
1796
1797         u8      UID[4];
1798
1799         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1800 };
1801
1802 static __devinit int try_init_acpi(struct SPMITable *spmi)
1803 {
1804         struct smi_info  *info;
1805         u8               addr_space;
1806
1807         if (spmi->IPMIlegacy != 1) {
1808             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1809             return -ENODEV;
1810         }
1811
1812         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1813                 addr_space = IPMI_MEM_ADDR_SPACE;
1814         else
1815                 addr_space = IPMI_IO_ADDR_SPACE;
1816
1817         info = kzalloc(sizeof(*info), GFP_KERNEL);
1818         if (!info) {
1819                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1820                 return -ENOMEM;
1821         }
1822
1823         info->addr_source = "ACPI";
1824
1825         /* Figure out the interface type. */
1826         switch (spmi->InterfaceType)
1827         {
1828         case 1: /* KCS */
1829                 info->si_type = SI_KCS;
1830                 break;
1831         case 2: /* SMIC */
1832                 info->si_type = SI_SMIC;
1833                 break;
1834         case 3: /* BT */
1835                 info->si_type = SI_BT;
1836                 break;
1837         default:
1838                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1839                         spmi->InterfaceType);
1840                 kfree(info);
1841                 return -EIO;
1842         }
1843
1844         if (spmi->InterruptType & 1) {
1845                 /* We've got a GPE interrupt. */
1846                 info->irq = spmi->GPE;
1847                 info->irq_setup = acpi_gpe_irq_setup;
1848         } else if (spmi->InterruptType & 2) {
1849                 /* We've got an APIC/SAPIC interrupt. */
1850                 info->irq = spmi->GlobalSystemInterrupt;
1851                 info->irq_setup = std_irq_setup;
1852         } else {
1853                 /* Use the default interrupt setting. */
1854                 info->irq = 0;
1855                 info->irq_setup = NULL;
1856         }
1857
1858         if (spmi->addr.bit_width) {
1859                 /* A (hopefully) properly formed register bit width. */
1860                 info->io.regspacing = spmi->addr.bit_width / 8;
1861         } else {
1862                 info->io.regspacing = DEFAULT_REGSPACING;
1863         }
1864         info->io.regsize = info->io.regspacing;
1865         info->io.regshift = spmi->addr.bit_offset;
1866
1867         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1868                 info->io_setup = mem_setup;
1869                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1870         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1871                 info->io_setup = port_setup;
1872                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1873         } else {
1874                 kfree(info);
1875                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1876                 return -EIO;
1877         }
1878         info->io.addr_data = spmi->addr.address;
1879
1880         try_smi_init(info);
1881
1882         return 0;
1883 }
1884
1885 static __devinit void acpi_find_bmc(void)
1886 {
1887         acpi_status      status;
1888         struct SPMITable *spmi;
1889         int              i;
1890
1891         if (acpi_disabled)
1892                 return;
1893
1894         if (acpi_failure)
1895                 return;
1896
1897         for (i = 0; ; i++) {
1898                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1899                                         (struct acpi_table_header **)&spmi);
1900                 if (status != AE_OK)
1901                         return;
1902
1903                 try_init_acpi(spmi);
1904         }
1905 }
1906 #endif
1907
1908 #ifdef CONFIG_DMI
1909 struct dmi_ipmi_data
1910 {
1911         u8              type;
1912         u8              addr_space;
1913         unsigned long   base_addr;
1914         u8              irq;
1915         u8              offset;
1916         u8              slave_addr;
1917 };
1918
1919 static int __devinit decode_dmi(struct dmi_header *dm,
1920                                 struct dmi_ipmi_data *dmi)
1921 {
1922         u8              *data = (u8 *)dm;
1923         unsigned long   base_addr;
1924         u8              reg_spacing;
1925         u8              len = dm->length;
1926
1927         dmi->type = data[4];
1928
1929         memcpy(&base_addr, data+8, sizeof(unsigned long));
1930         if (len >= 0x11) {
1931                 if (base_addr & 1) {
1932                         /* I/O */
1933                         base_addr &= 0xFFFE;
1934                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1935                 }
1936                 else {
1937                         /* Memory */
1938                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1939                 }
1940                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1941                    is odd. */
1942                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1943
1944                 dmi->irq = data[0x11];
1945
1946                 /* The top two bits of byte 0x10 hold the register spacing. */
1947                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1948                 switch(reg_spacing){
1949                 case 0x00: /* Byte boundaries */
1950                     dmi->offset = 1;
1951                     break;
1952                 case 0x01: /* 32-bit boundaries */
1953                     dmi->offset = 4;
1954                     break;
1955                 case 0x02: /* 16-byte boundaries */
1956                     dmi->offset = 16;
1957                     break;
1958                 default:
1959                     /* Some other interface, just ignore it. */
1960                     return -EIO;
1961                 }
1962         } else {
1963                 /* Old DMI spec. */
1964                 /* Note that technically, the lower bit of the base
1965                  * address should be 1 if the address is I/O and 0 if
1966                  * the address is in memory.  So many systems get that
1967                  * wrong (and all that I have seen are I/O) so we just
1968                  * ignore that bit and assume I/O.  Systems that use
1969                  * memory should use the newer spec, anyway. */
1970                 dmi->base_addr = base_addr & 0xfffe;
1971                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1972                 dmi->offset = 1;
1973         }
1974
1975         dmi->slave_addr = data[6];
1976
1977         return 0;
1978 }
1979
1980 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1981 {
1982         struct smi_info *info;
1983
1984         info = kzalloc(sizeof(*info), GFP_KERNEL);
1985         if (!info) {
1986                 printk(KERN_ERR
1987                        "ipmi_si: Could not allocate SI data\n");
1988                 return;
1989         }
1990
1991         info->addr_source = "SMBIOS";
1992
1993         switch (ipmi_data->type) {
1994         case 0x01: /* KCS */
1995                 info->si_type = SI_KCS;
1996                 break;
1997         case 0x02: /* SMIC */
1998                 info->si_type = SI_SMIC;
1999                 break;
2000         case 0x03: /* BT */
2001                 info->si_type = SI_BT;
2002                 break;
2003         default:
2004                 return;
2005         }
2006
2007         switch (ipmi_data->addr_space) {
2008         case IPMI_MEM_ADDR_SPACE:
2009                 info->io_setup = mem_setup;
2010                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2011                 break;
2012
2013         case IPMI_IO_ADDR_SPACE:
2014                 info->io_setup = port_setup;
2015                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2016                 break;
2017
2018         default:
2019                 kfree(info);
2020                 printk(KERN_WARNING
2021                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2022                        ipmi_data->addr_space);
2023                 return;
2024         }
2025         info->io.addr_data = ipmi_data->base_addr;
2026
2027         info->io.regspacing = ipmi_data->offset;
2028         if (!info->io.regspacing)
2029                 info->io.regspacing = DEFAULT_REGSPACING;
2030         info->io.regsize = DEFAULT_REGSPACING;
2031         info->io.regshift = 0;
2032
2033         info->slave_addr = ipmi_data->slave_addr;
2034
2035         info->irq = ipmi_data->irq;
2036         if (info->irq)
2037                 info->irq_setup = std_irq_setup;
2038
2039         try_smi_init(info);
2040 }
2041
2042 static void __devinit dmi_find_bmc(void)
2043 {
2044         struct dmi_device    *dev = NULL;
2045         struct dmi_ipmi_data data;
2046         int                  rv;
2047
2048         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2049                 memset(&data, 0, sizeof(data));
2050                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2051                 if (!rv)
2052                         try_init_dmi(&data);
2053         }
2054 }
2055 #endif /* CONFIG_DMI */
2056
2057 #ifdef CONFIG_PCI
2058
2059 #define PCI_ERMC_CLASSCODE              0x0C0700
2060 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2061 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2062 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2063 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2064 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2065
2066 #define PCI_HP_VENDOR_ID    0x103C
2067 #define PCI_MMC_DEVICE_ID   0x121A
2068 #define PCI_MMC_ADDR_CW     0x10
2069
2070 static void ipmi_pci_cleanup(struct smi_info *info)
2071 {
2072         struct pci_dev *pdev = info->addr_source_data;
2073
2074         pci_disable_device(pdev);
2075 }
2076
2077 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2078                                     const struct pci_device_id *ent)
2079 {
2080         int rv;
2081         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2082         struct smi_info *info;
2083         int first_reg_offset = 0;
2084
2085         info = kzalloc(sizeof(*info), GFP_KERNEL);
2086         if (!info)
2087                 return -ENOMEM;
2088
2089         info->addr_source = "PCI";
2090
2091         switch (class_type) {
2092         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2093                 info->si_type = SI_SMIC;
2094                 break;
2095
2096         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2097                 info->si_type = SI_KCS;
2098                 break;
2099
2100         case PCI_ERMC_CLASSCODE_TYPE_BT:
2101                 info->si_type = SI_BT;
2102                 break;
2103
2104         default:
2105                 kfree(info);
2106                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2107                        pci_name(pdev), class_type);
2108                 return -ENOMEM;
2109         }
2110
2111         rv = pci_enable_device(pdev);
2112         if (rv) {
2113                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2114                        pci_name(pdev));
2115                 kfree(info);
2116                 return rv;
2117         }
2118
2119         info->addr_source_cleanup = ipmi_pci_cleanup;
2120         info->addr_source_data = pdev;
2121
2122         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2123                 first_reg_offset = 1;
2124
2125         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2126                 info->io_setup = port_setup;
2127                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2128         } else {
2129                 info->io_setup = mem_setup;
2130                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2131         }
2132         info->io.addr_data = pci_resource_start(pdev, 0);
2133
2134         info->io.regspacing = DEFAULT_REGSPACING;
2135         info->io.regsize = DEFAULT_REGSPACING;
2136         info->io.regshift = 0;
2137
2138         info->irq = pdev->irq;
2139         if (info->irq)
2140                 info->irq_setup = std_irq_setup;
2141
2142         info->dev = &pdev->dev;
2143
2144         return try_smi_init(info);
2145 }
2146
2147 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2148 {
2149 }
2150
2151 #ifdef CONFIG_PM
2152 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2153 {
2154         return 0;
2155 }
2156
2157 static int ipmi_pci_resume(struct pci_dev *pdev)
2158 {
2159         return 0;
2160 }
2161 #endif
2162
2163 static struct pci_device_id ipmi_pci_devices[] = {
2164         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2165         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2166 };
2167 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2168
2169 static struct pci_driver ipmi_pci_driver = {
2170         .name =         DEVICE_NAME,
2171         .id_table =     ipmi_pci_devices,
2172         .probe =        ipmi_pci_probe,
2173         .remove =       __devexit_p(ipmi_pci_remove),
2174 #ifdef CONFIG_PM
2175         .suspend =      ipmi_pci_suspend,
2176         .resume =       ipmi_pci_resume,
2177 #endif
2178 };
2179 #endif /* CONFIG_PCI */
2180
2181
2182 #ifdef CONFIG_PPC_OF
2183 static int __devinit ipmi_of_probe(struct of_device *dev,
2184                          const struct of_device_id *match)
2185 {
2186         struct smi_info *info;
2187         struct resource resource;
2188         const int *regsize, *regspacing, *regshift;
2189         struct device_node *np = dev->node;
2190         int ret;
2191         int proplen;
2192
2193         dev_info(&dev->dev, PFX "probing via device tree\n");
2194
2195         ret = of_address_to_resource(np, 0, &resource);
2196         if (ret) {
2197                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2198                 return ret;
2199         }
2200
2201         regsize = get_property(np, "reg-size", &proplen);
2202         if (regsize && proplen != 4) {
2203                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2204                 return -EINVAL;
2205         }
2206
2207         regspacing = get_property(np, "reg-spacing", &proplen);
2208         if (regspacing && proplen != 4) {
2209                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2210                 return -EINVAL;
2211         }
2212
2213         regshift = get_property(np, "reg-shift", &proplen);
2214         if (regshift && proplen != 4) {
2215                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2216                 return -EINVAL;
2217         }
2218
2219         info = kzalloc(sizeof(*info), GFP_KERNEL);
2220
2221         if (!info) {
2222                 dev_err(&dev->dev,
2223                         PFX "could not allocate memory for OF probe\n");
2224                 return -ENOMEM;
2225         }
2226
2227         info->si_type           = (enum si_type) match->data;
2228         info->addr_source       = "device-tree";
2229         info->io_setup          = mem_setup;
2230         info->irq_setup         = std_irq_setup;
2231
2232         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2233         info->io.addr_data      = resource.start;
2234
2235         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2236         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2237         info->io.regshift       = regshift ? *regshift : 0;
2238
2239         info->irq               = irq_of_parse_and_map(dev->node, 0);
2240         info->dev               = &dev->dev;
2241
2242         dev_dbg(&dev->dev, "addr 0x%lx regsize %ld spacing %ld irq %x\n",
2243                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2244                 info->irq);
2245
2246         dev->dev.driver_data = (void*) info;
2247
2248         return try_smi_init(info);
2249 }
2250
2251 static int __devexit ipmi_of_remove(struct of_device *dev)
2252 {
2253         cleanup_one_si(dev->dev.driver_data);
2254         return 0;
2255 }
2256
2257 static struct of_device_id ipmi_match[] =
2258 {
2259         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2260         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2261         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2262         {},
2263 };
2264
2265 static struct of_platform_driver ipmi_of_platform_driver =
2266 {
2267         .name           = "ipmi",
2268         .match_table    = ipmi_match,
2269         .probe          = ipmi_of_probe,
2270         .remove         = __devexit_p(ipmi_of_remove),
2271 };
2272 #endif /* CONFIG_PPC_OF */
2273
2274
2275 static int try_get_dev_id(struct smi_info *smi_info)
2276 {
2277         unsigned char         msg[2];
2278         unsigned char         *resp;
2279         unsigned long         resp_len;
2280         enum si_sm_result     smi_result;
2281         int                   rv = 0;
2282
2283         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2284         if (!resp)
2285                 return -ENOMEM;
2286
2287         /* Do a Get Device ID command, since it comes back with some
2288            useful info. */
2289         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2290         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2291         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2292
2293         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2294         for (;;)
2295         {
2296                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2297                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2298                         schedule_timeout_uninterruptible(1);
2299                         smi_result = smi_info->handlers->event(
2300                                 smi_info->si_sm, 100);
2301                 }
2302                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2303                 {
2304                         smi_result = smi_info->handlers->event(
2305                                 smi_info->si_sm, 0);
2306                 }
2307                 else
2308                         break;
2309         }
2310         if (smi_result == SI_SM_HOSED) {
2311                 /* We couldn't get the state machine to run, so whatever's at
2312                    the port is probably not an IPMI SMI interface. */
2313                 rv = -ENODEV;
2314                 goto out;
2315         }
2316
2317         /* Otherwise, we got some data. */
2318         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2319                                                   resp, IPMI_MAX_MSG_LENGTH);
2320         if (resp_len < 14) {
2321                 /* That's odd, it should be longer. */
2322                 rv = -EINVAL;
2323                 goto out;
2324         }
2325
2326         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2327                 /* That's odd, it shouldn't be able to fail. */
2328                 rv = -EINVAL;
2329                 goto out;
2330         }
2331
2332         /* Record info from the get device id, in case we need it. */
2333         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2334
2335  out:
2336         kfree(resp);
2337         return rv;
2338 }
2339
2340 static int type_file_read_proc(char *page, char **start, off_t off,
2341                                int count, int *eof, void *data)
2342 {
2343         struct smi_info *smi = data;
2344
2345         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2346 }
2347
2348 static int stat_file_read_proc(char *page, char **start, off_t off,
2349                                int count, int *eof, void *data)
2350 {
2351         char            *out = (char *) page;
2352         struct smi_info *smi = data;
2353
2354         out += sprintf(out, "interrupts_enabled:    %d\n",
2355                        smi->irq && !smi->interrupt_disabled);
2356         out += sprintf(out, "short_timeouts:        %ld\n",
2357                        smi->short_timeouts);
2358         out += sprintf(out, "long_timeouts:         %ld\n",
2359                        smi->long_timeouts);
2360         out += sprintf(out, "timeout_restarts:      %ld\n",
2361                        smi->timeout_restarts);
2362         out += sprintf(out, "idles:                 %ld\n",
2363                        smi->idles);
2364         out += sprintf(out, "interrupts:            %ld\n",
2365                        smi->interrupts);
2366         out += sprintf(out, "attentions:            %ld\n",
2367                        smi->attentions);
2368         out += sprintf(out, "flag_fetches:          %ld\n",
2369                        smi->flag_fetches);
2370         out += sprintf(out, "hosed_count:           %ld\n",
2371                        smi->hosed_count);
2372         out += sprintf(out, "complete_transactions: %ld\n",
2373                        smi->complete_transactions);
2374         out += sprintf(out, "events:                %ld\n",
2375                        smi->events);
2376         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2377                        smi->watchdog_pretimeouts);
2378         out += sprintf(out, "incoming_messages:     %ld\n",
2379                        smi->incoming_messages);
2380
2381         return out - page;
2382 }
2383
2384 static int param_read_proc(char *page, char **start, off_t off,
2385                            int count, int *eof, void *data)
2386 {
2387         struct smi_info *smi = data;
2388
2389         return sprintf(page,
2390                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2391                        si_to_str[smi->si_type],
2392                        addr_space_to_str[smi->io.addr_type],
2393                        smi->io.addr_data,
2394                        smi->io.regspacing,
2395                        smi->io.regsize,
2396                        smi->io.regshift,
2397                        smi->irq,
2398                        smi->slave_addr);
2399 }
2400
2401 /*
2402  * oem_data_avail_to_receive_msg_avail
2403  * @info - smi_info structure with msg_flags set
2404  *
2405  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2406  * Returns 1 indicating need to re-run handle_flags().
2407  */
2408 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2409 {
2410         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2411                                 RECEIVE_MSG_AVAIL);
2412         return 1;
2413 }
2414
2415 /*
2416  * setup_dell_poweredge_oem_data_handler
2417  * @info - smi_info.device_id must be populated
2418  *
2419  * Systems that match, but have firmware version < 1.40 may assert
2420  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2421  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2422  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2423  * as RECEIVE_MSG_AVAIL instead.
2424  *
2425  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2426  * assert the OEM[012] bits, and if it did, the driver would have to
2427  * change to handle that properly, we don't actually check for the
2428  * firmware version.
2429  * Device ID = 0x20                BMC on PowerEdge 8G servers
2430  * Device Revision = 0x80
2431  * Firmware Revision1 = 0x01       BMC version 1.40
2432  * Firmware Revision2 = 0x40       BCD encoded
2433  * IPMI Version = 0x51             IPMI 1.5
2434  * Manufacturer ID = A2 02 00      Dell IANA
2435  *
2436  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2437  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2438  *
2439  */
2440 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2441 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2442 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2443 #define DELL_IANA_MFR_ID 0x0002a2
2444 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2445 {
2446         struct ipmi_device_id *id = &smi_info->device_id;
2447         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2448                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2449                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2450                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2451                         smi_info->oem_data_avail_handler =
2452                                 oem_data_avail_to_receive_msg_avail;
2453                 }
2454                 else if (ipmi_version_major(id) < 1 ||
2455                          (ipmi_version_major(id) == 1 &&
2456                           ipmi_version_minor(id) < 5)) {
2457                         smi_info->oem_data_avail_handler =
2458                                 oem_data_avail_to_receive_msg_avail;
2459                 }
2460         }
2461 }
2462
2463 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2464 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2465 {
2466         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2467
2468         /* Make it a reponse */
2469         msg->rsp[0] = msg->data[0] | 4;
2470         msg->rsp[1] = msg->data[1];
2471         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2472         msg->rsp_size = 3;
2473         smi_info->curr_msg = NULL;
2474         deliver_recv_msg(smi_info, msg);
2475 }
2476
2477 /*
2478  * dell_poweredge_bt_xaction_handler
2479  * @info - smi_info.device_id must be populated
2480  *
2481  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2482  * not respond to a Get SDR command if the length of the data
2483  * requested is exactly 0x3A, which leads to command timeouts and no
2484  * data returned.  This intercepts such commands, and causes userspace
2485  * callers to try again with a different-sized buffer, which succeeds.
2486  */
2487
2488 #define STORAGE_NETFN 0x0A
2489 #define STORAGE_CMD_GET_SDR 0x23
2490 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2491                                              unsigned long unused,
2492                                              void *in)
2493 {
2494         struct smi_info *smi_info = in;
2495         unsigned char *data = smi_info->curr_msg->data;
2496         unsigned int size   = smi_info->curr_msg->data_size;
2497         if (size >= 8 &&
2498             (data[0]>>2) == STORAGE_NETFN &&
2499             data[1] == STORAGE_CMD_GET_SDR &&
2500             data[7] == 0x3A) {
2501                 return_hosed_msg_badsize(smi_info);
2502                 return NOTIFY_STOP;
2503         }
2504         return NOTIFY_DONE;
2505 }
2506
2507 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2508         .notifier_call  = dell_poweredge_bt_xaction_handler,
2509 };
2510
2511 /*
2512  * setup_dell_poweredge_bt_xaction_handler
2513  * @info - smi_info.device_id must be filled in already
2514  *
2515  * Fills in smi_info.device_id.start_transaction_pre_hook
2516  * when we know what function to use there.
2517  */
2518 static void
2519 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2520 {
2521         struct ipmi_device_id *id = &smi_info->device_id;
2522         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2523             smi_info->si_type == SI_BT)
2524                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2525 }
2526
2527 /*
2528  * setup_oem_data_handler
2529  * @info - smi_info.device_id must be filled in already
2530  *
2531  * Fills in smi_info.device_id.oem_data_available_handler
2532  * when we know what function to use there.
2533  */
2534
2535 static void setup_oem_data_handler(struct smi_info *smi_info)
2536 {
2537         setup_dell_poweredge_oem_data_handler(smi_info);
2538 }
2539
2540 static void setup_xaction_handlers(struct smi_info *smi_info)
2541 {
2542         setup_dell_poweredge_bt_xaction_handler(smi_info);
2543 }
2544
2545 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2546 {
2547         if (smi_info->intf) {
2548                 /* The timer and thread are only running if the
2549                    interface has been started up and registered. */
2550                 if (smi_info->thread != NULL)
2551                         kthread_stop(smi_info->thread);
2552                 del_timer_sync(&smi_info->si_timer);
2553         }
2554 }
2555
2556 static __devinitdata struct ipmi_default_vals
2557 {
2558         int type;
2559         int port;
2560 } ipmi_defaults[] =
2561 {
2562         { .type = SI_KCS, .port = 0xca2 },
2563         { .type = SI_SMIC, .port = 0xca9 },
2564         { .type = SI_BT, .port = 0xe4 },
2565         { .port = 0 }
2566 };
2567
2568 static __devinit void default_find_bmc(void)
2569 {
2570         struct smi_info *info;
2571         int             i;
2572
2573         for (i = 0; ; i++) {
2574                 if (!ipmi_defaults[i].port)
2575                         break;
2576
2577                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2578                 if (!info)
2579                         return;
2580
2581 #ifdef CONFIG_PPC_MERGE
2582                 if (check_legacy_ioport(ipmi_defaults[i].port))
2583                         continue;
2584 #endif
2585
2586                 info->addr_source = NULL;
2587
2588                 info->si_type = ipmi_defaults[i].type;
2589                 info->io_setup = port_setup;
2590                 info->io.addr_data = ipmi_defaults[i].port;
2591                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2592
2593                 info->io.addr = NULL;
2594                 info->io.regspacing = DEFAULT_REGSPACING;
2595                 info->io.regsize = DEFAULT_REGSPACING;
2596                 info->io.regshift = 0;
2597
2598                 if (try_smi_init(info) == 0) {
2599                         /* Found one... */
2600                         printk(KERN_INFO "ipmi_si: Found default %s state"
2601                                " machine at %s address 0x%lx\n",
2602                                si_to_str[info->si_type],
2603                                addr_space_to_str[info->io.addr_type],
2604                                info->io.addr_data);
2605                         return;
2606                 }
2607         }
2608 }
2609
2610 static int is_new_interface(struct smi_info *info)
2611 {
2612         struct smi_info *e;
2613
2614         list_for_each_entry(e, &smi_infos, link) {
2615                 if (e->io.addr_type != info->io.addr_type)
2616                         continue;
2617                 if (e->io.addr_data == info->io.addr_data)
2618                         return 0;
2619         }
2620
2621         return 1;
2622 }
2623
2624 static int try_smi_init(struct smi_info *new_smi)
2625 {
2626         int rv;
2627
2628         if (new_smi->addr_source) {
2629                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2630                        " machine at %s address 0x%lx, slave address 0x%x,"
2631                        " irq %d\n",
2632                        new_smi->addr_source,
2633                        si_to_str[new_smi->si_type],
2634                        addr_space_to_str[new_smi->io.addr_type],
2635                        new_smi->io.addr_data,
2636                        new_smi->slave_addr, new_smi->irq);
2637         }
2638
2639         mutex_lock(&smi_infos_lock);
2640         if (!is_new_interface(new_smi)) {
2641                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2642                 rv = -EBUSY;
2643                 goto out_err;
2644         }
2645
2646         /* So we know not to free it unless we have allocated one. */
2647         new_smi->intf = NULL;
2648         new_smi->si_sm = NULL;
2649         new_smi->handlers = NULL;
2650
2651         switch (new_smi->si_type) {
2652         case SI_KCS:
2653                 new_smi->handlers = &kcs_smi_handlers;
2654                 break;
2655
2656         case SI_SMIC:
2657                 new_smi->handlers = &smic_smi_handlers;
2658                 break;
2659
2660         case SI_BT:
2661                 new_smi->handlers = &bt_smi_handlers;
2662                 break;
2663
2664         default:
2665                 /* No support for anything else yet. */
2666                 rv = -EIO;
2667                 goto out_err;
2668         }
2669
2670         /* Allocate the state machine's data and initialize it. */
2671         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2672         if (!new_smi->si_sm) {
2673                 printk(" Could not allocate state machine memory\n");
2674                 rv = -ENOMEM;
2675                 goto out_err;
2676         }
2677         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2678                                                         &new_smi->io);
2679
2680         /* Now that we know the I/O size, we can set up the I/O. */
2681         rv = new_smi->io_setup(new_smi);
2682         if (rv) {
2683                 printk(" Could not set up I/O space\n");
2684                 goto out_err;
2685         }
2686
2687         spin_lock_init(&(new_smi->si_lock));
2688         spin_lock_init(&(new_smi->msg_lock));
2689         spin_lock_init(&(new_smi->count_lock));
2690
2691         /* Do low-level detection first. */
2692         if (new_smi->handlers->detect(new_smi->si_sm)) {
2693                 if (new_smi->addr_source)
2694                         printk(KERN_INFO "ipmi_si: Interface detection"
2695                                " failed\n");
2696                 rv = -ENODEV;
2697                 goto out_err;
2698         }
2699
2700         /* Attempt a get device id command.  If it fails, we probably
2701            don't have a BMC here. */
2702         rv = try_get_dev_id(new_smi);
2703         if (rv) {
2704                 if (new_smi->addr_source)
2705                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2706                                " at this location\n");
2707                 goto out_err;
2708         }
2709
2710         setup_oem_data_handler(new_smi);
2711         setup_xaction_handlers(new_smi);
2712
2713         /* Try to claim any interrupts. */
2714         if (new_smi->irq_setup)
2715                 new_smi->irq_setup(new_smi);
2716
2717         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2718         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2719         new_smi->curr_msg = NULL;
2720         atomic_set(&new_smi->req_events, 0);
2721         new_smi->run_to_completion = 0;
2722
2723         new_smi->interrupt_disabled = 0;
2724         atomic_set(&new_smi->stop_operation, 0);
2725         new_smi->intf_num = smi_num;
2726         smi_num++;
2727
2728         /* Start clearing the flags before we enable interrupts or the
2729            timer to avoid racing with the timer. */
2730         start_clear_flags(new_smi);
2731         /* IRQ is defined to be set when non-zero. */
2732         if (new_smi->irq)
2733                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2734
2735         if (!new_smi->dev) {
2736                 /* If we don't already have a device from something
2737                  * else (like PCI), then register a new one. */
2738                 new_smi->pdev = platform_device_alloc("ipmi_si",
2739                                                       new_smi->intf_num);
2740                 if (rv) {
2741                         printk(KERN_ERR
2742                                "ipmi_si_intf:"
2743                                " Unable to allocate platform device\n");
2744                         goto out_err;
2745                 }
2746                 new_smi->dev = &new_smi->pdev->dev;
2747                 new_smi->dev->driver = &ipmi_driver;
2748
2749                 rv = platform_device_add(new_smi->pdev);
2750                 if (rv) {
2751                         printk(KERN_ERR
2752                                "ipmi_si_intf:"
2753                                " Unable to register system interface device:"
2754                                " %d\n",
2755                                rv);
2756                         goto out_err;
2757                 }
2758                 new_smi->dev_registered = 1;
2759         }
2760
2761         rv = ipmi_register_smi(&handlers,
2762                                new_smi,
2763                                &new_smi->device_id,
2764                                new_smi->dev,
2765                                "bmc",
2766                                new_smi->slave_addr);
2767         if (rv) {
2768                 printk(KERN_ERR
2769                        "ipmi_si: Unable to register device: error %d\n",
2770                        rv);
2771                 goto out_err_stop_timer;
2772         }
2773
2774         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2775                                      type_file_read_proc, NULL,
2776                                      new_smi, THIS_MODULE);
2777         if (rv) {
2778                 printk(KERN_ERR
2779                        "ipmi_si: Unable to create proc entry: %d\n",
2780                        rv);
2781                 goto out_err_stop_timer;
2782         }
2783
2784         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2785                                      stat_file_read_proc, NULL,
2786                                      new_smi, THIS_MODULE);
2787         if (rv) {
2788                 printk(KERN_ERR
2789                        "ipmi_si: Unable to create proc entry: %d\n",
2790                        rv);
2791                 goto out_err_stop_timer;
2792         }
2793
2794         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2795                                      param_read_proc, NULL,
2796                                      new_smi, THIS_MODULE);
2797         if (rv) {
2798                 printk(KERN_ERR
2799                        "ipmi_si: Unable to create proc entry: %d\n",
2800                        rv);
2801                 goto out_err_stop_timer;
2802         }
2803
2804         list_add_tail(&new_smi->link, &smi_infos);
2805
2806         mutex_unlock(&smi_infos_lock);
2807
2808         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2809
2810         return 0;
2811
2812  out_err_stop_timer:
2813         atomic_inc(&new_smi->stop_operation);
2814         wait_for_timer_and_thread(new_smi);
2815
2816  out_err:
2817         if (new_smi->intf)
2818                 ipmi_unregister_smi(new_smi->intf);
2819
2820         if (new_smi->irq_cleanup)
2821                 new_smi->irq_cleanup(new_smi);
2822
2823         /* Wait until we know that we are out of any interrupt
2824            handlers might have been running before we freed the
2825            interrupt. */
2826         synchronize_sched();
2827
2828         if (new_smi->si_sm) {
2829                 if (new_smi->handlers)
2830                         new_smi->handlers->cleanup(new_smi->si_sm);
2831                 kfree(new_smi->si_sm);
2832         }
2833         if (new_smi->addr_source_cleanup)
2834                 new_smi->addr_source_cleanup(new_smi);
2835         if (new_smi->io_cleanup)
2836                 new_smi->io_cleanup(new_smi);
2837
2838         if (new_smi->dev_registered)
2839                 platform_device_unregister(new_smi->pdev);
2840
2841         kfree(new_smi);
2842
2843         mutex_unlock(&smi_infos_lock);
2844
2845         return rv;
2846 }
2847
2848 static __devinit int init_ipmi_si(void)
2849 {
2850         int  i;
2851         char *str;
2852         int  rv;
2853
2854         if (initialized)
2855                 return 0;
2856         initialized = 1;
2857
2858         /* Register the device drivers. */
2859         rv = driver_register(&ipmi_driver);
2860         if (rv) {
2861                 printk(KERN_ERR
2862                        "init_ipmi_si: Unable to register driver: %d\n",
2863                        rv);
2864                 return rv;
2865         }
2866
2867
2868         /* Parse out the si_type string into its components. */
2869         str = si_type_str;
2870         if (*str != '\0') {
2871                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2872                         si_type[i] = str;
2873                         str = strchr(str, ',');
2874                         if (str) {
2875                                 *str = '\0';
2876                                 str++;
2877                         } else {
2878                                 break;
2879                         }
2880                 }
2881         }
2882
2883         printk(KERN_INFO "IPMI System Interface driver.\n");
2884
2885         hardcode_find_bmc();
2886
2887 #ifdef CONFIG_DMI
2888         dmi_find_bmc();
2889 #endif
2890
2891 #ifdef CONFIG_ACPI
2892         acpi_find_bmc();
2893 #endif
2894
2895 #ifdef CONFIG_PCI
2896         rv = pci_register_driver(&ipmi_pci_driver);
2897         if (rv){
2898                 printk(KERN_ERR
2899                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2900                        rv);
2901         }
2902 #endif
2903
2904 #ifdef CONFIG_PPC_OF
2905         of_register_platform_driver(&ipmi_of_platform_driver);
2906 #endif
2907
2908         if (si_trydefaults) {
2909                 mutex_lock(&smi_infos_lock);
2910                 if (list_empty(&smi_infos)) {
2911                         /* No BMC was found, try defaults. */
2912                         mutex_unlock(&smi_infos_lock);
2913                         default_find_bmc();
2914                 } else {
2915                         mutex_unlock(&smi_infos_lock);
2916                 }
2917         }
2918
2919         mutex_lock(&smi_infos_lock);
2920         if (unload_when_empty && list_empty(&smi_infos)) {
2921                 mutex_unlock(&smi_infos_lock);
2922 #ifdef CONFIG_PCI
2923                 pci_unregister_driver(&ipmi_pci_driver);
2924 #endif
2925                 driver_unregister(&ipmi_driver);
2926                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2927                 return -ENODEV;
2928         } else {
2929                 mutex_unlock(&smi_infos_lock);
2930                 return 0;
2931         }
2932 }
2933 module_init(init_ipmi_si);
2934
2935 static void cleanup_one_si(struct smi_info *to_clean)
2936 {
2937         int           rv;
2938         unsigned long flags;
2939
2940         if (!to_clean)
2941                 return;
2942
2943         list_del(&to_clean->link);
2944
2945         /* Tell the timer and interrupt handlers that we are shutting
2946            down. */
2947         spin_lock_irqsave(&(to_clean->si_lock), flags);
2948         spin_lock(&(to_clean->msg_lock));
2949
2950         atomic_inc(&to_clean->stop_operation);
2951
2952         if (to_clean->irq_cleanup)
2953                 to_clean->irq_cleanup(to_clean);
2954
2955         spin_unlock(&(to_clean->msg_lock));
2956         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2957
2958         /* Wait until we know that we are out of any interrupt
2959            handlers might have been running before we freed the
2960            interrupt. */
2961         synchronize_sched();
2962
2963         wait_for_timer_and_thread(to_clean);
2964
2965         /* Interrupts and timeouts are stopped, now make sure the
2966            interface is in a clean state. */
2967         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2968                 poll(to_clean);
2969                 schedule_timeout_uninterruptible(1);
2970         }
2971
2972         rv = ipmi_unregister_smi(to_clean->intf);
2973         if (rv) {
2974                 printk(KERN_ERR
2975                        "ipmi_si: Unable to unregister device: errno=%d\n",
2976                        rv);
2977         }
2978
2979         to_clean->handlers->cleanup(to_clean->si_sm);
2980
2981         kfree(to_clean->si_sm);
2982
2983         if (to_clean->addr_source_cleanup)
2984                 to_clean->addr_source_cleanup(to_clean);
2985         if (to_clean->io_cleanup)
2986                 to_clean->io_cleanup(to_clean);
2987
2988         if (to_clean->dev_registered)
2989                 platform_device_unregister(to_clean->pdev);
2990
2991         kfree(to_clean);
2992 }
2993
2994 static __exit void cleanup_ipmi_si(void)
2995 {
2996         struct smi_info *e, *tmp_e;
2997
2998         if (!initialized)
2999                 return;
3000
3001 #ifdef CONFIG_PCI
3002         pci_unregister_driver(&ipmi_pci_driver);
3003 #endif
3004
3005 #ifdef CONFIG_PPC_OF
3006         of_unregister_platform_driver(&ipmi_of_platform_driver);
3007 #endif
3008
3009         mutex_lock(&smi_infos_lock);
3010         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3011                 cleanup_one_si(e);
3012         mutex_unlock(&smi_infos_lock);
3013
3014         driver_unregister(&ipmi_driver);
3015 }
3016 module_exit(cleanup_ipmi_si);
3017
3018 MODULE_LICENSE("GPL");
3019 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3020 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");