4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
58 #ifdef CONFIG_HIGH_RES_TIMERS
59 #include <linux/hrtime.h>
60 # if defined(schedule_next_int)
61 /* Old high-res timer code, do translations. */
62 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
63 # define arch_cycles_per_jiffy cycles_per_jiffies
65 static inline void add_usec_to_timer(struct timer_list *t, long v)
67 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
68 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
71 t->arch_cycle_expires -= arch_cycles_per_jiffy;
75 #include <linux/interrupt.h>
76 #include <linux/rcupdate.h>
77 #include <linux/ipmi_smi.h>
79 #include "ipmi_si_sm.h"
80 #include <linux/init.h>
81 #include <linux/dmi.h>
83 /* Measure times between events in the driver. */
86 /* Call every 10 ms. */
87 #define SI_TIMEOUT_TIME_USEC 10000
88 #define SI_USEC_PER_JIFFY (1000000/HZ)
89 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
90 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
98 SI_CLEARING_FLAGS_THEN_SET_IRQ,
100 SI_ENABLE_INTERRUPTS1,
101 SI_ENABLE_INTERRUPTS2
102 /* FIXME - add watchdog stuff. */
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG 2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
111 SI_KCS, SI_SMIC, SI_BT
113 static char *si_to_str[] = { "KCS", "SMIC", "BT" };
115 #define DEVICE_NAME "ipmi_si"
117 static struct device_driver ipmi_driver =
120 .bus = &platform_bus_type
127 struct si_sm_data *si_sm;
128 struct si_sm_handlers *handlers;
129 enum si_type si_type;
132 struct list_head xmit_msgs;
133 struct list_head hp_xmit_msgs;
134 struct ipmi_smi_msg *curr_msg;
135 enum si_intf_state si_state;
137 /* Used to handle the various types of I/O that can occur with
140 int (*io_setup)(struct smi_info *info);
141 void (*io_cleanup)(struct smi_info *info);
142 int (*irq_setup)(struct smi_info *info);
143 void (*irq_cleanup)(struct smi_info *info);
144 unsigned int io_size;
145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data;
149 /* Per-OEM handler, called from handle_flags().
150 Returns 1 when handle_flags() needs to be re-run
151 or 0 indicating it set si_state itself.
153 int (*oem_data_avail_handler)(struct smi_info *smi_info);
155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156 is set to hold the flags until we are done handling everything
158 #define RECEIVE_MSG_AVAIL 0x01
159 #define EVENT_MSG_BUFFER_FULL 0x02
160 #define WDT_PRE_TIMEOUT_INT 0x08
161 #define OEM0_DATA_AVAIL 0x20
162 #define OEM1_DATA_AVAIL 0x40
163 #define OEM2_DATA_AVAIL 0x80
164 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
167 unsigned char msg_flags;
169 /* If set to true, this will request events the next time the
170 state machine is idle. */
173 /* If true, run the state machine to completion on every send
174 call. Generally used after a panic to make sure stuff goes
176 int run_to_completion;
178 /* The I/O port of an SI interface. */
181 /* The space between start addresses of the two ports. For
182 instance, if the first port is 0xca2 and the spacing is 4, then
183 the second port is 0xca6. */
184 unsigned int spacing;
186 /* zero if no irq; */
189 /* The timer for this si. */
190 struct timer_list si_timer;
192 /* The time (in jiffies) the last timeout occurred at. */
193 unsigned long last_timeout_jiffies;
195 /* Used to gracefully stop the timer without race conditions. */
196 atomic_t stop_operation;
198 /* The driver will disable interrupts when it gets into a
199 situation where it cannot handle messages due to lack of
200 memory. Once that situation clears up, it will re-enable
202 int interrupt_disabled;
204 /* From the get device id response... */
205 struct ipmi_device_id device_id;
207 /* Driver model stuff. */
209 struct platform_device *pdev;
211 /* True if we allocated the device, false if it came from
212 * someplace else (like PCI). */
215 /* Slave address, could be reported from DMI. */
216 unsigned char slave_addr;
218 /* Counters and things for the proc filesystem. */
219 spinlock_t count_lock;
220 unsigned long short_timeouts;
221 unsigned long long_timeouts;
222 unsigned long timeout_restarts;
224 unsigned long interrupts;
225 unsigned long attentions;
226 unsigned long flag_fetches;
227 unsigned long hosed_count;
228 unsigned long complete_transactions;
229 unsigned long events;
230 unsigned long watchdog_pretimeouts;
231 unsigned long incoming_messages;
233 struct task_struct *thread;
235 struct list_head link;
238 static int try_smi_init(struct smi_info *smi);
240 static struct notifier_block *xaction_notifier_list;
241 static int register_xaction_notifier(struct notifier_block * nb)
243 return notifier_chain_register(&xaction_notifier_list, nb);
246 static void si_restart_short_timer(struct smi_info *smi_info);
248 static void deliver_recv_msg(struct smi_info *smi_info,
249 struct ipmi_smi_msg *msg)
251 /* Deliver the message to the upper layer with the lock
253 spin_unlock(&(smi_info->si_lock));
254 ipmi_smi_msg_received(smi_info->intf, msg);
255 spin_lock(&(smi_info->si_lock));
258 static void return_hosed_msg(struct smi_info *smi_info)
260 struct ipmi_smi_msg *msg = smi_info->curr_msg;
262 /* Make it a reponse */
263 msg->rsp[0] = msg->data[0] | 4;
264 msg->rsp[1] = msg->data[1];
265 msg->rsp[2] = 0xFF; /* Unknown error. */
268 smi_info->curr_msg = NULL;
269 deliver_recv_msg(smi_info, msg);
272 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
275 struct list_head *entry = NULL;
280 /* No need to save flags, we aleady have interrupts off and we
281 already hold the SMI lock. */
282 spin_lock(&(smi_info->msg_lock));
284 /* Pick the high priority queue first. */
285 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
286 entry = smi_info->hp_xmit_msgs.next;
287 } else if (!list_empty(&(smi_info->xmit_msgs))) {
288 entry = smi_info->xmit_msgs.next;
292 smi_info->curr_msg = NULL;
298 smi_info->curr_msg = list_entry(entry,
303 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
305 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
306 if (err & NOTIFY_STOP_MASK) {
307 rv = SI_SM_CALL_WITHOUT_DELAY;
310 err = smi_info->handlers->start_transaction(
312 smi_info->curr_msg->data,
313 smi_info->curr_msg->data_size);
315 return_hosed_msg(smi_info);
318 rv = SI_SM_CALL_WITHOUT_DELAY;
321 spin_unlock(&(smi_info->msg_lock));
326 static void start_enable_irq(struct smi_info *smi_info)
328 unsigned char msg[2];
330 /* If we are enabling interrupts, we have to tell the
332 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
333 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
335 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
336 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
339 static void start_clear_flags(struct smi_info *smi_info)
341 unsigned char msg[3];
343 /* Make sure the watchdog pre-timeout flag is not set at startup. */
344 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
345 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
346 msg[2] = WDT_PRE_TIMEOUT_INT;
348 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
349 smi_info->si_state = SI_CLEARING_FLAGS;
352 /* When we have a situtaion where we run out of memory and cannot
353 allocate messages, we just leave them in the BMC and run the system
354 polled until we can allocate some memory. Once we have some
355 memory, we will re-enable the interrupt. */
356 static inline void disable_si_irq(struct smi_info *smi_info)
358 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
359 disable_irq_nosync(smi_info->irq);
360 smi_info->interrupt_disabled = 1;
364 static inline void enable_si_irq(struct smi_info *smi_info)
366 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
367 enable_irq(smi_info->irq);
368 smi_info->interrupt_disabled = 0;
372 static void handle_flags(struct smi_info *smi_info)
375 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
376 /* Watchdog pre-timeout */
377 spin_lock(&smi_info->count_lock);
378 smi_info->watchdog_pretimeouts++;
379 spin_unlock(&smi_info->count_lock);
381 start_clear_flags(smi_info);
382 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
383 spin_unlock(&(smi_info->si_lock));
384 ipmi_smi_watchdog_pretimeout(smi_info->intf);
385 spin_lock(&(smi_info->si_lock));
386 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
387 /* Messages available. */
388 smi_info->curr_msg = ipmi_alloc_smi_msg();
389 if (!smi_info->curr_msg) {
390 disable_si_irq(smi_info);
391 smi_info->si_state = SI_NORMAL;
394 enable_si_irq(smi_info);
396 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
397 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
398 smi_info->curr_msg->data_size = 2;
400 smi_info->handlers->start_transaction(
402 smi_info->curr_msg->data,
403 smi_info->curr_msg->data_size);
404 smi_info->si_state = SI_GETTING_MESSAGES;
405 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
406 /* Events available. */
407 smi_info->curr_msg = ipmi_alloc_smi_msg();
408 if (!smi_info->curr_msg) {
409 disable_si_irq(smi_info);
410 smi_info->si_state = SI_NORMAL;
413 enable_si_irq(smi_info);
415 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
416 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
417 smi_info->curr_msg->data_size = 2;
419 smi_info->handlers->start_transaction(
421 smi_info->curr_msg->data,
422 smi_info->curr_msg->data_size);
423 smi_info->si_state = SI_GETTING_EVENTS;
424 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
425 if (smi_info->oem_data_avail_handler)
426 if (smi_info->oem_data_avail_handler(smi_info))
429 smi_info->si_state = SI_NORMAL;
433 static void handle_transaction_done(struct smi_info *smi_info)
435 struct ipmi_smi_msg *msg;
440 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
442 switch (smi_info->si_state) {
444 if (!smi_info->curr_msg)
447 smi_info->curr_msg->rsp_size
448 = smi_info->handlers->get_result(
450 smi_info->curr_msg->rsp,
451 IPMI_MAX_MSG_LENGTH);
453 /* Do this here becase deliver_recv_msg() releases the
454 lock, and a new message can be put in during the
455 time the lock is released. */
456 msg = smi_info->curr_msg;
457 smi_info->curr_msg = NULL;
458 deliver_recv_msg(smi_info, msg);
461 case SI_GETTING_FLAGS:
463 unsigned char msg[4];
466 /* We got the flags from the SMI, now handle them. */
467 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
469 /* Error fetching flags, just give up for
471 smi_info->si_state = SI_NORMAL;
472 } else if (len < 4) {
473 /* Hmm, no flags. That's technically illegal, but
474 don't use uninitialized data. */
475 smi_info->si_state = SI_NORMAL;
477 smi_info->msg_flags = msg[3];
478 handle_flags(smi_info);
483 case SI_CLEARING_FLAGS:
484 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
486 unsigned char msg[3];
488 /* We cleared the flags. */
489 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
491 /* Error clearing flags */
493 "ipmi_si: Error clearing flags: %2.2x\n",
496 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
497 start_enable_irq(smi_info);
499 smi_info->si_state = SI_NORMAL;
503 case SI_GETTING_EVENTS:
505 smi_info->curr_msg->rsp_size
506 = smi_info->handlers->get_result(
508 smi_info->curr_msg->rsp,
509 IPMI_MAX_MSG_LENGTH);
511 /* Do this here becase deliver_recv_msg() releases the
512 lock, and a new message can be put in during the
513 time the lock is released. */
514 msg = smi_info->curr_msg;
515 smi_info->curr_msg = NULL;
516 if (msg->rsp[2] != 0) {
517 /* Error getting event, probably done. */
520 /* Take off the event flag. */
521 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
522 handle_flags(smi_info);
524 spin_lock(&smi_info->count_lock);
526 spin_unlock(&smi_info->count_lock);
528 /* Do this before we deliver the message
529 because delivering the message releases the
530 lock and something else can mess with the
532 handle_flags(smi_info);
534 deliver_recv_msg(smi_info, msg);
539 case SI_GETTING_MESSAGES:
541 smi_info->curr_msg->rsp_size
542 = smi_info->handlers->get_result(
544 smi_info->curr_msg->rsp,
545 IPMI_MAX_MSG_LENGTH);
547 /* Do this here becase deliver_recv_msg() releases the
548 lock, and a new message can be put in during the
549 time the lock is released. */
550 msg = smi_info->curr_msg;
551 smi_info->curr_msg = NULL;
552 if (msg->rsp[2] != 0) {
553 /* Error getting event, probably done. */
556 /* Take off the msg flag. */
557 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
558 handle_flags(smi_info);
560 spin_lock(&smi_info->count_lock);
561 smi_info->incoming_messages++;
562 spin_unlock(&smi_info->count_lock);
564 /* Do this before we deliver the message
565 because delivering the message releases the
566 lock and something else can mess with the
568 handle_flags(smi_info);
570 deliver_recv_msg(smi_info, msg);
575 case SI_ENABLE_INTERRUPTS1:
577 unsigned char msg[4];
579 /* We got the flags from the SMI, now handle them. */
580 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
583 "ipmi_si: Could not enable interrupts"
584 ", failed get, using polled mode.\n");
585 smi_info->si_state = SI_NORMAL;
587 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
588 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
589 msg[2] = msg[3] | 1; /* enable msg queue int */
590 smi_info->handlers->start_transaction(
591 smi_info->si_sm, msg, 3);
592 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
597 case SI_ENABLE_INTERRUPTS2:
599 unsigned char msg[4];
601 /* We got the flags from the SMI, now handle them. */
602 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
605 "ipmi_si: Could not enable interrupts"
606 ", failed set, using polled mode.\n");
608 smi_info->si_state = SI_NORMAL;
614 /* Called on timeouts and events. Timeouts should pass the elapsed
615 time, interrupts should pass in zero. */
616 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
619 enum si_sm_result si_sm_result;
622 /* There used to be a loop here that waited a little while
623 (around 25us) before giving up. That turned out to be
624 pointless, the minimum delays I was seeing were in the 300us
625 range, which is far too long to wait in an interrupt. So
626 we just run until the state machine tells us something
627 happened or it needs a delay. */
628 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
630 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
632 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
635 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
637 spin_lock(&smi_info->count_lock);
638 smi_info->complete_transactions++;
639 spin_unlock(&smi_info->count_lock);
641 handle_transaction_done(smi_info);
642 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
644 else if (si_sm_result == SI_SM_HOSED)
646 spin_lock(&smi_info->count_lock);
647 smi_info->hosed_count++;
648 spin_unlock(&smi_info->count_lock);
650 /* Do the before return_hosed_msg, because that
651 releases the lock. */
652 smi_info->si_state = SI_NORMAL;
653 if (smi_info->curr_msg != NULL) {
654 /* If we were handling a user message, format
655 a response to send to the upper layer to
656 tell it about the error. */
657 return_hosed_msg(smi_info);
659 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
662 /* We prefer handling attn over new messages. */
663 if (si_sm_result == SI_SM_ATTN)
665 unsigned char msg[2];
667 spin_lock(&smi_info->count_lock);
668 smi_info->attentions++;
669 spin_unlock(&smi_info->count_lock);
671 /* Got a attn, send down a get message flags to see
672 what's causing it. It would be better to handle
673 this in the upper layer, but due to the way
674 interrupts work with the SMI, that's not really
676 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
677 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
679 smi_info->handlers->start_transaction(
680 smi_info->si_sm, msg, 2);
681 smi_info->si_state = SI_GETTING_FLAGS;
685 /* If we are currently idle, try to start the next message. */
686 if (si_sm_result == SI_SM_IDLE) {
687 spin_lock(&smi_info->count_lock);
689 spin_unlock(&smi_info->count_lock);
691 si_sm_result = start_next_msg(smi_info);
692 if (si_sm_result != SI_SM_IDLE)
696 if ((si_sm_result == SI_SM_IDLE)
697 && (atomic_read(&smi_info->req_events)))
699 /* We are idle and the upper layer requested that I fetch
701 unsigned char msg[2];
703 spin_lock(&smi_info->count_lock);
704 smi_info->flag_fetches++;
705 spin_unlock(&smi_info->count_lock);
707 atomic_set(&smi_info->req_events, 0);
708 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
709 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
711 smi_info->handlers->start_transaction(
712 smi_info->si_sm, msg, 2);
713 smi_info->si_state = SI_GETTING_FLAGS;
720 static void sender(void *send_info,
721 struct ipmi_smi_msg *msg,
724 struct smi_info *smi_info = send_info;
725 enum si_sm_result result;
731 spin_lock_irqsave(&(smi_info->msg_lock), flags);
734 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
737 if (smi_info->run_to_completion) {
738 /* If we are running to completion, then throw it in
739 the list and run transactions until everything is
740 clear. Priority doesn't matter here. */
741 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
743 /* We have to release the msg lock and claim the smi
744 lock in this case, because of race conditions. */
745 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
747 spin_lock_irqsave(&(smi_info->si_lock), flags);
748 result = smi_event_handler(smi_info, 0);
749 while (result != SI_SM_IDLE) {
750 udelay(SI_SHORT_TIMEOUT_USEC);
751 result = smi_event_handler(smi_info,
752 SI_SHORT_TIMEOUT_USEC);
754 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
758 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
760 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
763 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
765 spin_lock_irqsave(&(smi_info->si_lock), flags);
766 if ((smi_info->si_state == SI_NORMAL)
767 && (smi_info->curr_msg == NULL))
769 start_next_msg(smi_info);
770 si_restart_short_timer(smi_info);
772 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
775 static void set_run_to_completion(void *send_info, int i_run_to_completion)
777 struct smi_info *smi_info = send_info;
778 enum si_sm_result result;
781 spin_lock_irqsave(&(smi_info->si_lock), flags);
783 smi_info->run_to_completion = i_run_to_completion;
784 if (i_run_to_completion) {
785 result = smi_event_handler(smi_info, 0);
786 while (result != SI_SM_IDLE) {
787 udelay(SI_SHORT_TIMEOUT_USEC);
788 result = smi_event_handler(smi_info,
789 SI_SHORT_TIMEOUT_USEC);
793 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
796 static int ipmi_thread(void *data)
798 struct smi_info *smi_info = data;
800 enum si_sm_result smi_result;
802 set_user_nice(current, 19);
803 while (!kthread_should_stop()) {
804 spin_lock_irqsave(&(smi_info->si_lock), flags);
805 smi_result=smi_event_handler(smi_info, 0);
806 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
807 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
810 else if (smi_result == SI_SM_CALL_WITH_DELAY)
813 schedule_timeout_interruptible(1);
819 static void poll(void *send_info)
821 struct smi_info *smi_info = send_info;
823 smi_event_handler(smi_info, 0);
826 static void request_events(void *send_info)
828 struct smi_info *smi_info = send_info;
830 atomic_set(&smi_info->req_events, 1);
833 static int initialized = 0;
835 /* Must be called with interrupts off and with the si_lock held. */
836 static void si_restart_short_timer(struct smi_info *smi_info)
838 #if defined(CONFIG_HIGH_RES_TIMERS)
840 unsigned long jiffies_now;
843 if (del_timer(&(smi_info->si_timer))) {
844 /* If we don't delete the timer, then it will go off
845 immediately, anyway. So we only process if we
846 actually delete the timer. */
849 seq = read_seqbegin_irqsave(&xtime_lock, flags);
850 jiffies_now = jiffies;
851 smi_info->si_timer.expires = jiffies_now;
852 smi_info->si_timer.arch_cycle_expires
853 = get_arch_cycles(jiffies_now);
854 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
856 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
858 add_timer(&(smi_info->si_timer));
859 spin_lock_irqsave(&smi_info->count_lock, flags);
860 smi_info->timeout_restarts++;
861 spin_unlock_irqrestore(&smi_info->count_lock, flags);
866 static void smi_timeout(unsigned long data)
868 struct smi_info *smi_info = (struct smi_info *) data;
869 enum si_sm_result smi_result;
871 unsigned long jiffies_now;
877 if (atomic_read(&smi_info->stop_operation))
880 spin_lock_irqsave(&(smi_info->si_lock), flags);
883 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
885 jiffies_now = jiffies;
886 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
887 * SI_USEC_PER_JIFFY);
888 smi_result = smi_event_handler(smi_info, time_diff);
890 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
892 smi_info->last_timeout_jiffies = jiffies_now;
894 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
895 /* Running with interrupts, only do long timeouts. */
896 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
897 spin_lock_irqsave(&smi_info->count_lock, flags);
898 smi_info->long_timeouts++;
899 spin_unlock_irqrestore(&smi_info->count_lock, flags);
903 /* If the state machine asks for a short delay, then shorten
904 the timer timeout. */
905 if (smi_result == SI_SM_CALL_WITH_DELAY) {
906 #if defined(CONFIG_HIGH_RES_TIMERS)
909 spin_lock_irqsave(&smi_info->count_lock, flags);
910 smi_info->short_timeouts++;
911 spin_unlock_irqrestore(&smi_info->count_lock, flags);
912 #if defined(CONFIG_HIGH_RES_TIMERS)
914 seq = read_seqbegin_irqsave(&xtime_lock, flags);
915 smi_info->si_timer.expires = jiffies;
916 smi_info->si_timer.arch_cycle_expires
917 = get_arch_cycles(smi_info->si_timer.expires);
918 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
919 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
921 smi_info->si_timer.expires = jiffies + 1;
924 spin_lock_irqsave(&smi_info->count_lock, flags);
925 smi_info->long_timeouts++;
926 spin_unlock_irqrestore(&smi_info->count_lock, flags);
927 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
928 #if defined(CONFIG_HIGH_RES_TIMERS)
929 smi_info->si_timer.arch_cycle_expires = 0;
934 add_timer(&(smi_info->si_timer));
937 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
939 struct smi_info *smi_info = data;
945 spin_lock_irqsave(&(smi_info->si_lock), flags);
947 spin_lock(&smi_info->count_lock);
948 smi_info->interrupts++;
949 spin_unlock(&smi_info->count_lock);
951 if (atomic_read(&smi_info->stop_operation))
956 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
958 smi_event_handler(smi_info, 0);
960 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
964 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
966 struct smi_info *smi_info = data;
967 /* We need to clear the IRQ flag for the BT interface. */
968 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
969 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
970 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
971 return si_irq_handler(irq, data, regs);
975 static struct ipmi_smi_handlers handlers =
977 .owner = THIS_MODULE,
979 .request_events = request_events,
980 .set_run_to_completion = set_run_to_completion,
984 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
985 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
987 #define SI_MAX_PARMS 4
988 static LIST_HEAD(smi_infos);
989 static DECLARE_MUTEX(smi_infos_lock);
990 static int smi_num; /* Used to sequence the SMIs */
992 #define DEFAULT_REGSPACING 1
994 static int si_trydefaults = 1;
995 static char *si_type[SI_MAX_PARMS];
996 #define MAX_SI_TYPE_STR 30
997 static char si_type_str[MAX_SI_TYPE_STR];
998 static unsigned long addrs[SI_MAX_PARMS];
999 static int num_addrs;
1000 static unsigned int ports[SI_MAX_PARMS];
1001 static int num_ports;
1002 static int irqs[SI_MAX_PARMS];
1003 static int num_irqs;
1004 static int regspacings[SI_MAX_PARMS];
1005 static int num_regspacings = 0;
1006 static int regsizes[SI_MAX_PARMS];
1007 static int num_regsizes = 0;
1008 static int regshifts[SI_MAX_PARMS];
1009 static int num_regshifts = 0;
1010 static int slave_addrs[SI_MAX_PARMS];
1011 static int num_slave_addrs = 0;
1014 module_param_named(trydefaults, si_trydefaults, bool, 0);
1015 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1016 " default scan of the KCS and SMIC interface at the standard"
1018 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1019 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1020 " interface separated by commas. The types are 'kcs',"
1021 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1022 " the first interface to kcs and the second to bt");
1023 module_param_array(addrs, long, &num_addrs, 0);
1024 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1025 " addresses separated by commas. Only use if an interface"
1026 " is in memory. Otherwise, set it to zero or leave"
1028 module_param_array(ports, int, &num_ports, 0);
1029 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1030 " addresses separated by commas. Only use if an interface"
1031 " is a port. Otherwise, set it to zero or leave"
1033 module_param_array(irqs, int, &num_irqs, 0);
1034 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1035 " addresses separated by commas. Only use if an interface"
1036 " has an interrupt. Otherwise, set it to zero or leave"
1038 module_param_array(regspacings, int, &num_regspacings, 0);
1039 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1040 " and each successive register used by the interface. For"
1041 " instance, if the start address is 0xca2 and the spacing"
1042 " is 2, then the second address is at 0xca4. Defaults"
1044 module_param_array(regsizes, int, &num_regsizes, 0);
1045 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1046 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1047 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1048 " the 8-bit IPMI register has to be read from a larger"
1050 module_param_array(regshifts, int, &num_regshifts, 0);
1051 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1052 " IPMI register, in bits. For instance, if the data"
1053 " is read from a 32-bit word and the IPMI data is in"
1054 " bit 8-15, then the shift would be 8");
1055 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1056 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1057 " the controller. Normally this is 0x20, but can be"
1058 " overridden by this parm. This is an array indexed"
1059 " by interface number.");
1062 #define IPMI_IO_ADDR_SPACE 0
1063 #define IPMI_MEM_ADDR_SPACE 1
1064 static char *addr_space_to_str[] = { "I/O", "memory" };
1066 static void std_irq_cleanup(struct smi_info *info)
1068 if (info->si_type == SI_BT)
1069 /* Disable the interrupt in the BT interface. */
1070 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1071 free_irq(info->irq, info);
1074 static int std_irq_setup(struct smi_info *info)
1081 if (info->si_type == SI_BT) {
1082 rv = request_irq(info->irq,
1088 /* Enable the interrupt in the BT interface. */
1089 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1090 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1092 rv = request_irq(info->irq,
1099 "ipmi_si: %s unable to claim interrupt %d,"
1100 " running polled\n",
1101 DEVICE_NAME, info->irq);
1104 info->irq_cleanup = std_irq_cleanup;
1105 printk(" Using irq %d\n", info->irq);
1111 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1113 unsigned int addr = io->addr_data;
1115 return inb(addr + (offset * io->regspacing));
1118 static void port_outb(struct si_sm_io *io, unsigned int offset,
1121 unsigned int addr = io->addr_data;
1123 outb(b, addr + (offset * io->regspacing));
1126 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1128 unsigned int addr = io->addr_data;
1130 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1133 static void port_outw(struct si_sm_io *io, unsigned int offset,
1136 unsigned int addr = io->addr_data;
1138 outw(b << io->regshift, addr + (offset * io->regspacing));
1141 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1143 unsigned int addr = io->addr_data;
1145 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1148 static void port_outl(struct si_sm_io *io, unsigned int offset,
1151 unsigned int addr = io->addr_data;
1153 outl(b << io->regshift, addr+(offset * io->regspacing));
1156 static void port_cleanup(struct smi_info *info)
1158 unsigned int addr = info->io.addr_data;
1162 mapsize = ((info->io_size * info->io.regspacing)
1163 - (info->io.regspacing - info->io.regsize));
1165 release_region (addr, mapsize);
1169 static int port_setup(struct smi_info *info)
1171 unsigned int addr = info->io.addr_data;
1177 info->io_cleanup = port_cleanup;
1179 /* Figure out the actual inb/inw/inl/etc routine to use based
1180 upon the register size. */
1181 switch (info->io.regsize) {
1183 info->io.inputb = port_inb;
1184 info->io.outputb = port_outb;
1187 info->io.inputb = port_inw;
1188 info->io.outputb = port_outw;
1191 info->io.inputb = port_inl;
1192 info->io.outputb = port_outl;
1195 printk("ipmi_si: Invalid register size: %d\n",
1200 /* Calculate the total amount of memory to claim. This is an
1201 * unusual looking calculation, but it avoids claiming any
1202 * more memory than it has to. It will claim everything
1203 * between the first address to the end of the last full
1205 mapsize = ((info->io_size * info->io.regspacing)
1206 - (info->io.regspacing - info->io.regsize));
1208 if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
1213 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1215 return readb((io->addr)+(offset * io->regspacing));
1218 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1221 writeb(b, (io->addr)+(offset * io->regspacing));
1224 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1226 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1230 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1233 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1236 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1238 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1242 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1245 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1249 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1251 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1255 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1258 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1262 static void mem_cleanup(struct smi_info *info)
1264 unsigned long addr = info->io.addr_data;
1267 if (info->io.addr) {
1268 iounmap(info->io.addr);
1270 mapsize = ((info->io_size * info->io.regspacing)
1271 - (info->io.regspacing - info->io.regsize));
1273 release_mem_region(addr, mapsize);
1277 static int mem_setup(struct smi_info *info)
1279 unsigned long addr = info->io.addr_data;
1285 info->io_cleanup = mem_cleanup;
1287 /* Figure out the actual readb/readw/readl/etc routine to use based
1288 upon the register size. */
1289 switch (info->io.regsize) {
1291 info->io.inputb = intf_mem_inb;
1292 info->io.outputb = intf_mem_outb;
1295 info->io.inputb = intf_mem_inw;
1296 info->io.outputb = intf_mem_outw;
1299 info->io.inputb = intf_mem_inl;
1300 info->io.outputb = intf_mem_outl;
1304 info->io.inputb = mem_inq;
1305 info->io.outputb = mem_outq;
1309 printk("ipmi_si: Invalid register size: %d\n",
1314 /* Calculate the total amount of memory to claim. This is an
1315 * unusual looking calculation, but it avoids claiming any
1316 * more memory than it has to. It will claim everything
1317 * between the first address to the end of the last full
1319 mapsize = ((info->io_size * info->io.regspacing)
1320 - (info->io.regspacing - info->io.regsize));
1322 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1325 info->io.addr = ioremap(addr, mapsize);
1326 if (info->io.addr == NULL) {
1327 release_mem_region(addr, mapsize);
1334 static __devinit void hardcode_find_bmc(void)
1337 struct smi_info *info;
1339 for (i = 0; i < SI_MAX_PARMS; i++) {
1340 if (!ports[i] && !addrs[i])
1343 info = kzalloc(sizeof(*info), GFP_KERNEL);
1347 info->addr_source = "hardcoded";
1349 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1350 info->si_type = SI_KCS;
1351 } else if (strcmp(si_type[i], "smic") == 0) {
1352 info->si_type = SI_SMIC;
1353 } else if (strcmp(si_type[i], "bt") == 0) {
1354 info->si_type = SI_BT;
1357 "ipmi_si: Interface type specified "
1358 "for interface %d, was invalid: %s\n",
1366 info->io_setup = port_setup;
1367 info->io.addr_data = ports[i];
1368 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1369 } else if (addrs[i]) {
1371 info->io_setup = mem_setup;
1372 info->io.addr_data = addrs[i];
1373 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1376 "ipmi_si: Interface type specified "
1377 "for interface %d, "
1378 "but port and address were not set or "
1379 "set to zero.\n", i);
1384 info->io.addr = NULL;
1385 info->io.regspacing = regspacings[i];
1386 if (!info->io.regspacing)
1387 info->io.regspacing = DEFAULT_REGSPACING;
1388 info->io.regsize = regsizes[i];
1389 if (!info->io.regsize)
1390 info->io.regsize = DEFAULT_REGSPACING;
1391 info->io.regshift = regshifts[i];
1392 info->irq = irqs[i];
1394 info->irq_setup = std_irq_setup;
1402 #include <linux/acpi.h>
1404 /* Once we get an ACPI failure, we don't try any more, because we go
1405 through the tables sequentially. Once we don't find a table, there
1407 static int acpi_failure = 0;
1409 /* For GPE-type interrupts. */
1410 static u32 ipmi_acpi_gpe(void *context)
1412 struct smi_info *smi_info = context;
1413 unsigned long flags;
1418 spin_lock_irqsave(&(smi_info->si_lock), flags);
1420 spin_lock(&smi_info->count_lock);
1421 smi_info->interrupts++;
1422 spin_unlock(&smi_info->count_lock);
1424 if (atomic_read(&smi_info->stop_operation))
1428 do_gettimeofday(&t);
1429 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1431 smi_event_handler(smi_info, 0);
1433 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1435 return ACPI_INTERRUPT_HANDLED;
1438 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1443 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1446 static int acpi_gpe_irq_setup(struct smi_info *info)
1453 /* FIXME - is level triggered right? */
1454 status = acpi_install_gpe_handler(NULL,
1456 ACPI_GPE_LEVEL_TRIGGERED,
1459 if (status != AE_OK) {
1461 "ipmi_si: %s unable to claim ACPI GPE %d,"
1462 " running polled\n",
1463 DEVICE_NAME, info->irq);
1467 info->irq_cleanup = acpi_gpe_irq_cleanup;
1468 printk(" Using ACPI GPE %d\n", info->irq);
1475 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1486 s8 CreatorRevision[4];
1489 s16 SpecificationRevision;
1492 * Bit 0 - SCI interrupt supported
1493 * Bit 1 - I/O APIC/SAPIC
1497 /* If bit 0 of InterruptType is set, then this is the SCI
1498 interrupt in the GPEx_STS register. */
1503 /* If bit 1 of InterruptType is set, then this is the I/O
1504 APIC/SAPIC interrupt. */
1505 u32 GlobalSystemInterrupt;
1507 /* The actual register address. */
1508 struct acpi_generic_address addr;
1512 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1515 static __devinit int try_init_acpi(struct SPMITable *spmi)
1517 struct smi_info *info;
1521 if (spmi->IPMIlegacy != 1) {
1522 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1526 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1527 addr_space = IPMI_MEM_ADDR_SPACE;
1529 addr_space = IPMI_IO_ADDR_SPACE;
1531 info = kzalloc(sizeof(*info), GFP_KERNEL);
1533 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1537 info->addr_source = "ACPI";
1539 /* Figure out the interface type. */
1540 switch (spmi->InterfaceType)
1543 info->si_type = SI_KCS;
1546 info->si_type = SI_SMIC;
1549 info->si_type = SI_BT;
1552 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1553 spmi->InterfaceType);
1558 if (spmi->InterruptType & 1) {
1559 /* We've got a GPE interrupt. */
1560 info->irq = spmi->GPE;
1561 info->irq_setup = acpi_gpe_irq_setup;
1562 } else if (spmi->InterruptType & 2) {
1563 /* We've got an APIC/SAPIC interrupt. */
1564 info->irq = spmi->GlobalSystemInterrupt;
1565 info->irq_setup = std_irq_setup;
1567 /* Use the default interrupt setting. */
1569 info->irq_setup = NULL;
1572 if (spmi->addr.register_bit_width) {
1573 /* A (hopefully) properly formed register bit width. */
1574 info->io.regspacing = spmi->addr.register_bit_width / 8;
1576 info->io.regspacing = DEFAULT_REGSPACING;
1578 info->io.regsize = info->io.regspacing;
1579 info->io.regshift = spmi->addr.register_bit_offset;
1581 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1583 info->io_setup = mem_setup;
1584 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1585 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1587 info->io_setup = port_setup;
1588 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1591 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1594 info->io.addr_data = spmi->addr.address;
1601 static __devinit void acpi_find_bmc(void)
1604 struct SPMITable *spmi;
1613 for (i = 0; ; i++) {
1614 status = acpi_get_firmware_table("SPMI", i+1,
1615 ACPI_LOGICAL_ADDRESSING,
1616 (struct acpi_table_header **)
1618 if (status != AE_OK)
1621 try_init_acpi(spmi);
1627 struct dmi_ipmi_data
1631 unsigned long base_addr;
1637 static int __devinit decode_dmi(struct dmi_header *dm,
1638 struct dmi_ipmi_data *dmi)
1640 u8 *data = (u8 *)dm;
1641 unsigned long base_addr;
1643 u8 len = dm->length;
1645 dmi->type = data[4];
1647 memcpy(&base_addr, data+8, sizeof(unsigned long));
1649 if (base_addr & 1) {
1651 base_addr &= 0xFFFE;
1652 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1656 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1658 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1660 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1662 dmi->irq = data[0x11];
1664 /* The top two bits of byte 0x10 hold the register spacing. */
1665 reg_spacing = (data[0x10] & 0xC0) >> 6;
1666 switch(reg_spacing){
1667 case 0x00: /* Byte boundaries */
1670 case 0x01: /* 32-bit boundaries */
1673 case 0x02: /* 16-byte boundaries */
1677 /* Some other interface, just ignore it. */
1682 /* Note that technically, the lower bit of the base
1683 * address should be 1 if the address is I/O and 0 if
1684 * the address is in memory. So many systems get that
1685 * wrong (and all that I have seen are I/O) so we just
1686 * ignore that bit and assume I/O. Systems that use
1687 * memory should use the newer spec, anyway. */
1688 dmi->base_addr = base_addr & 0xfffe;
1689 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1693 dmi->slave_addr = data[6];
1698 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1700 struct smi_info *info;
1702 info = kzalloc(sizeof(*info), GFP_KERNEL);
1705 "ipmi_si: Could not allocate SI data\n");
1709 info->addr_source = "SMBIOS";
1711 switch (ipmi_data->type) {
1712 case 0x01: /* KCS */
1713 info->si_type = SI_KCS;
1715 case 0x02: /* SMIC */
1716 info->si_type = SI_SMIC;
1719 info->si_type = SI_BT;
1725 switch (ipmi_data->addr_space) {
1726 case IPMI_MEM_ADDR_SPACE:
1727 info->io_setup = mem_setup;
1728 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1731 case IPMI_IO_ADDR_SPACE:
1732 info->io_setup = port_setup;
1733 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1739 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1740 ipmi_data->addr_space);
1743 info->io.addr_data = ipmi_data->base_addr;
1745 info->io.regspacing = ipmi_data->offset;
1746 if (!info->io.regspacing)
1747 info->io.regspacing = DEFAULT_REGSPACING;
1748 info->io.regsize = DEFAULT_REGSPACING;
1749 info->io.regshift = 0;
1751 info->slave_addr = ipmi_data->slave_addr;
1753 info->irq = ipmi_data->irq;
1755 info->irq_setup = std_irq_setup;
1760 static void __devinit dmi_find_bmc(void)
1762 struct dmi_device *dev = NULL;
1763 struct dmi_ipmi_data data;
1766 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1767 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1769 try_init_dmi(&data);
1772 #endif /* CONFIG_DMI */
1776 #define PCI_ERMC_CLASSCODE 0x0C0700
1777 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
1778 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
1779 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
1780 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
1781 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
1783 #define PCI_HP_VENDOR_ID 0x103C
1784 #define PCI_MMC_DEVICE_ID 0x121A
1785 #define PCI_MMC_ADDR_CW 0x10
1787 static void ipmi_pci_cleanup(struct smi_info *info)
1789 struct pci_dev *pdev = info->addr_source_data;
1791 pci_disable_device(pdev);
1794 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1795 const struct pci_device_id *ent)
1798 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1799 struct smi_info *info;
1800 int first_reg_offset = 0;
1802 info = kzalloc(sizeof(*info), GFP_KERNEL);
1806 info->addr_source = "PCI";
1808 switch (class_type) {
1809 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1810 info->si_type = SI_SMIC;
1813 case PCI_ERMC_CLASSCODE_TYPE_KCS:
1814 info->si_type = SI_KCS;
1817 case PCI_ERMC_CLASSCODE_TYPE_BT:
1818 info->si_type = SI_BT;
1823 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1824 pci_name(pdev), class_type);
1828 rv = pci_enable_device(pdev);
1830 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1836 info->addr_source_cleanup = ipmi_pci_cleanup;
1837 info->addr_source_data = pdev;
1839 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1840 first_reg_offset = 1;
1842 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1843 info->io_setup = port_setup;
1844 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1846 info->io_setup = mem_setup;
1847 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1849 info->io.addr_data = pci_resource_start(pdev, 0);
1851 info->io.regspacing = DEFAULT_REGSPACING;
1852 info->io.regsize = DEFAULT_REGSPACING;
1853 info->io.regshift = 0;
1855 info->irq = pdev->irq;
1857 info->irq_setup = std_irq_setup;
1859 info->dev = &pdev->dev;
1861 return try_smi_init(info);
1864 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1869 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1874 static int ipmi_pci_resume(struct pci_dev *pdev)
1880 static struct pci_device_id ipmi_pci_devices[] = {
1881 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1882 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1884 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1886 static struct pci_driver ipmi_pci_driver = {
1887 .name = DEVICE_NAME,
1888 .id_table = ipmi_pci_devices,
1889 .probe = ipmi_pci_probe,
1890 .remove = __devexit_p(ipmi_pci_remove),
1892 .suspend = ipmi_pci_suspend,
1893 .resume = ipmi_pci_resume,
1896 #endif /* CONFIG_PCI */
1899 static int try_get_dev_id(struct smi_info *smi_info)
1901 unsigned char msg[2];
1902 unsigned char *resp;
1903 unsigned long resp_len;
1904 enum si_sm_result smi_result;
1907 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1911 /* Do a Get Device ID command, since it comes back with some
1913 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1914 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1915 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1917 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1920 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1921 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1922 schedule_timeout_uninterruptible(1);
1923 smi_result = smi_info->handlers->event(
1924 smi_info->si_sm, 100);
1926 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1928 smi_result = smi_info->handlers->event(
1929 smi_info->si_sm, 0);
1934 if (smi_result == SI_SM_HOSED) {
1935 /* We couldn't get the state machine to run, so whatever's at
1936 the port is probably not an IPMI SMI interface. */
1941 /* Otherwise, we got some data. */
1942 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1943 resp, IPMI_MAX_MSG_LENGTH);
1944 if (resp_len < 14) {
1945 /* That's odd, it should be longer. */
1950 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1951 /* That's odd, it shouldn't be able to fail. */
1956 /* Record info from the get device id, in case we need it. */
1957 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1964 static int type_file_read_proc(char *page, char **start, off_t off,
1965 int count, int *eof, void *data)
1967 char *out = (char *) page;
1968 struct smi_info *smi = data;
1970 switch (smi->si_type) {
1972 return sprintf(out, "kcs\n");
1974 return sprintf(out, "smic\n");
1976 return sprintf(out, "bt\n");
1982 static int stat_file_read_proc(char *page, char **start, off_t off,
1983 int count, int *eof, void *data)
1985 char *out = (char *) page;
1986 struct smi_info *smi = data;
1988 out += sprintf(out, "interrupts_enabled: %d\n",
1989 smi->irq && !smi->interrupt_disabled);
1990 out += sprintf(out, "short_timeouts: %ld\n",
1991 smi->short_timeouts);
1992 out += sprintf(out, "long_timeouts: %ld\n",
1993 smi->long_timeouts);
1994 out += sprintf(out, "timeout_restarts: %ld\n",
1995 smi->timeout_restarts);
1996 out += sprintf(out, "idles: %ld\n",
1998 out += sprintf(out, "interrupts: %ld\n",
2000 out += sprintf(out, "attentions: %ld\n",
2002 out += sprintf(out, "flag_fetches: %ld\n",
2004 out += sprintf(out, "hosed_count: %ld\n",
2006 out += sprintf(out, "complete_transactions: %ld\n",
2007 smi->complete_transactions);
2008 out += sprintf(out, "events: %ld\n",
2010 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2011 smi->watchdog_pretimeouts);
2012 out += sprintf(out, "incoming_messages: %ld\n",
2013 smi->incoming_messages);
2015 return (out - ((char *) page));
2019 * oem_data_avail_to_receive_msg_avail
2020 * @info - smi_info structure with msg_flags set
2022 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2023 * Returns 1 indicating need to re-run handle_flags().
2025 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2027 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2033 * setup_dell_poweredge_oem_data_handler
2034 * @info - smi_info.device_id must be populated
2036 * Systems that match, but have firmware version < 1.40 may assert
2037 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2038 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2039 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2040 * as RECEIVE_MSG_AVAIL instead.
2042 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2043 * assert the OEM[012] bits, and if it did, the driver would have to
2044 * change to handle that properly, we don't actually check for the
2046 * Device ID = 0x20 BMC on PowerEdge 8G servers
2047 * Device Revision = 0x80
2048 * Firmware Revision1 = 0x01 BMC version 1.40
2049 * Firmware Revision2 = 0x40 BCD encoded
2050 * IPMI Version = 0x51 IPMI 1.5
2051 * Manufacturer ID = A2 02 00 Dell IANA
2053 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2054 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2057 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2058 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2059 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2060 #define DELL_IANA_MFR_ID 0x0002a2
2061 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2063 struct ipmi_device_id *id = &smi_info->device_id;
2064 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2065 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2066 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2067 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2068 smi_info->oem_data_avail_handler =
2069 oem_data_avail_to_receive_msg_avail;
2071 else if (ipmi_version_major(id) < 1 ||
2072 (ipmi_version_major(id) == 1 &&
2073 ipmi_version_minor(id) < 5)) {
2074 smi_info->oem_data_avail_handler =
2075 oem_data_avail_to_receive_msg_avail;
2080 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2081 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2083 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2085 /* Make it a reponse */
2086 msg->rsp[0] = msg->data[0] | 4;
2087 msg->rsp[1] = msg->data[1];
2088 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2090 smi_info->curr_msg = NULL;
2091 deliver_recv_msg(smi_info, msg);
2095 * dell_poweredge_bt_xaction_handler
2096 * @info - smi_info.device_id must be populated
2098 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2099 * not respond to a Get SDR command if the length of the data
2100 * requested is exactly 0x3A, which leads to command timeouts and no
2101 * data returned. This intercepts such commands, and causes userspace
2102 * callers to try again with a different-sized buffer, which succeeds.
2105 #define STORAGE_NETFN 0x0A
2106 #define STORAGE_CMD_GET_SDR 0x23
2107 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2108 unsigned long unused,
2111 struct smi_info *smi_info = in;
2112 unsigned char *data = smi_info->curr_msg->data;
2113 unsigned int size = smi_info->curr_msg->data_size;
2115 (data[0]>>2) == STORAGE_NETFN &&
2116 data[1] == STORAGE_CMD_GET_SDR &&
2118 return_hosed_msg_badsize(smi_info);
2124 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2125 .notifier_call = dell_poweredge_bt_xaction_handler,
2129 * setup_dell_poweredge_bt_xaction_handler
2130 * @info - smi_info.device_id must be filled in already
2132 * Fills in smi_info.device_id.start_transaction_pre_hook
2133 * when we know what function to use there.
2136 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2138 struct ipmi_device_id *id = &smi_info->device_id;
2139 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2140 smi_info->si_type == SI_BT)
2141 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2145 * setup_oem_data_handler
2146 * @info - smi_info.device_id must be filled in already
2148 * Fills in smi_info.device_id.oem_data_available_handler
2149 * when we know what function to use there.
2152 static void setup_oem_data_handler(struct smi_info *smi_info)
2154 setup_dell_poweredge_oem_data_handler(smi_info);
2157 static void setup_xaction_handlers(struct smi_info *smi_info)
2159 setup_dell_poweredge_bt_xaction_handler(smi_info);
2162 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2164 if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
2165 kthread_stop(smi_info->thread);
2166 del_timer_sync(&smi_info->si_timer);
2169 static struct ipmi_default_vals
2173 } __devinit ipmi_defaults[] =
2175 { .type = SI_KCS, .port = 0xca2 },
2176 { .type = SI_SMIC, .port = 0xca9 },
2177 { .type = SI_BT, .port = 0xe4 },
2181 static __devinit void default_find_bmc(void)
2183 struct smi_info *info;
2186 for (i = 0; ; i++) {
2187 if (!ipmi_defaults[i].port)
2190 info = kzalloc(sizeof(*info), GFP_KERNEL);
2194 info->addr_source = NULL;
2196 info->si_type = ipmi_defaults[i].type;
2197 info->io_setup = port_setup;
2198 info->io.addr_data = ipmi_defaults[i].port;
2199 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2201 info->io.addr = NULL;
2202 info->io.regspacing = DEFAULT_REGSPACING;
2203 info->io.regsize = DEFAULT_REGSPACING;
2204 info->io.regshift = 0;
2206 if (try_smi_init(info) == 0) {
2208 printk(KERN_INFO "ipmi_si: Found default %s state"
2209 " machine at %s address 0x%lx\n",
2210 si_to_str[info->si_type],
2211 addr_space_to_str[info->io.addr_type],
2212 info->io.addr_data);
2218 static int is_new_interface(struct smi_info *info)
2222 list_for_each_entry(e, &smi_infos, link) {
2223 if (e->io.addr_type != info->io.addr_type)
2225 if (e->io.addr_data == info->io.addr_data)
2232 static int try_smi_init(struct smi_info *new_smi)
2236 if (new_smi->addr_source) {
2237 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2238 " machine at %s address 0x%lx, slave address 0x%x,"
2240 new_smi->addr_source,
2241 si_to_str[new_smi->si_type],
2242 addr_space_to_str[new_smi->io.addr_type],
2243 new_smi->io.addr_data,
2244 new_smi->slave_addr, new_smi->irq);
2247 down(&smi_infos_lock);
2248 if (!is_new_interface(new_smi)) {
2249 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2254 /* So we know not to free it unless we have allocated one. */
2255 new_smi->intf = NULL;
2256 new_smi->si_sm = NULL;
2257 new_smi->handlers = NULL;
2259 switch (new_smi->si_type) {
2261 new_smi->handlers = &kcs_smi_handlers;
2265 new_smi->handlers = &smic_smi_handlers;
2269 new_smi->handlers = &bt_smi_handlers;
2273 /* No support for anything else yet. */
2278 /* Allocate the state machine's data and initialize it. */
2279 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2280 if (!new_smi->si_sm) {
2281 printk(" Could not allocate state machine memory\n");
2285 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2288 /* Now that we know the I/O size, we can set up the I/O. */
2289 rv = new_smi->io_setup(new_smi);
2291 printk(" Could not set up I/O space\n");
2295 spin_lock_init(&(new_smi->si_lock));
2296 spin_lock_init(&(new_smi->msg_lock));
2297 spin_lock_init(&(new_smi->count_lock));
2299 /* Do low-level detection first. */
2300 if (new_smi->handlers->detect(new_smi->si_sm)) {
2301 if (new_smi->addr_source)
2302 printk(KERN_INFO "ipmi_si: Interface detection"
2308 /* Attempt a get device id command. If it fails, we probably
2309 don't have a BMC here. */
2310 rv = try_get_dev_id(new_smi);
2312 if (new_smi->addr_source)
2313 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2314 " at this location\n");
2318 setup_oem_data_handler(new_smi);
2319 setup_xaction_handlers(new_smi);
2321 /* Try to claim any interrupts. */
2322 if (new_smi->irq_setup)
2323 new_smi->irq_setup(new_smi);
2325 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2326 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2327 new_smi->curr_msg = NULL;
2328 atomic_set(&new_smi->req_events, 0);
2329 new_smi->run_to_completion = 0;
2331 new_smi->interrupt_disabled = 0;
2332 atomic_set(&new_smi->stop_operation, 0);
2333 new_smi->intf_num = smi_num;
2336 /* Start clearing the flags before we enable interrupts or the
2337 timer to avoid racing with the timer. */
2338 start_clear_flags(new_smi);
2339 /* IRQ is defined to be set when non-zero. */
2341 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2343 /* The ipmi_register_smi() code does some operations to
2344 determine the channel information, so we must be ready to
2345 handle operations before it is called. This means we have
2346 to stop the timer if we get an error after this point. */
2347 init_timer(&(new_smi->si_timer));
2348 new_smi->si_timer.data = (long) new_smi;
2349 new_smi->si_timer.function = smi_timeout;
2350 new_smi->last_timeout_jiffies = jiffies;
2351 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2353 add_timer(&(new_smi->si_timer));
2354 if (new_smi->si_type != SI_BT)
2355 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2356 "kipmi%d", new_smi->intf_num);
2358 if (!new_smi->dev) {
2359 /* If we don't already have a device from something
2360 * else (like PCI), then register a new one. */
2361 new_smi->pdev = platform_device_alloc("ipmi_si",
2366 " Unable to allocate platform device\n");
2367 goto out_err_stop_timer;
2369 new_smi->dev = &new_smi->pdev->dev;
2370 new_smi->dev->driver = &ipmi_driver;
2372 rv = platform_device_register(new_smi->pdev);
2376 " Unable to register system interface device:"
2379 goto out_err_stop_timer;
2381 new_smi->dev_registered = 1;
2384 rv = ipmi_register_smi(&handlers,
2386 &new_smi->device_id,
2388 new_smi->slave_addr,
2392 "ipmi_si: Unable to register device: error %d\n",
2394 goto out_err_stop_timer;
2397 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2398 type_file_read_proc, NULL,
2399 new_smi, THIS_MODULE);
2402 "ipmi_si: Unable to create proc entry: %d\n",
2404 goto out_err_stop_timer;
2407 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2408 stat_file_read_proc, NULL,
2409 new_smi, THIS_MODULE);
2412 "ipmi_si: Unable to create proc entry: %d\n",
2414 goto out_err_stop_timer;
2417 list_add_tail(&new_smi->link, &smi_infos);
2419 up(&smi_infos_lock);
2421 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2426 atomic_inc(&new_smi->stop_operation);
2427 wait_for_timer_and_thread(new_smi);
2431 ipmi_unregister_smi(new_smi->intf);
2433 if (new_smi->irq_cleanup)
2434 new_smi->irq_cleanup(new_smi);
2436 /* Wait until we know that we are out of any interrupt
2437 handlers might have been running before we freed the
2439 synchronize_sched();
2441 if (new_smi->si_sm) {
2442 if (new_smi->handlers)
2443 new_smi->handlers->cleanup(new_smi->si_sm);
2444 kfree(new_smi->si_sm);
2446 if (new_smi->addr_source_cleanup)
2447 new_smi->addr_source_cleanup(new_smi);
2448 if (new_smi->io_cleanup)
2449 new_smi->io_cleanup(new_smi);
2451 if (new_smi->dev_registered)
2452 platform_device_unregister(new_smi->pdev);
2456 up(&smi_infos_lock);
2461 static __devinit int init_ipmi_si(void)
2471 /* Register the device drivers. */
2472 rv = driver_register(&ipmi_driver);
2475 "init_ipmi_si: Unable to register driver: %d\n",
2481 /* Parse out the si_type string into its components. */
2484 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2486 str = strchr(str, ',');
2496 printk(KERN_INFO "IPMI System Interface driver.\n");
2498 hardcode_find_bmc();
2510 pci_module_init(&ipmi_pci_driver);
2513 if (si_trydefaults) {
2514 down(&smi_infos_lock);
2515 if (list_empty(&smi_infos)) {
2516 /* No BMC was found, try defaults. */
2517 up(&smi_infos_lock);
2520 up(&smi_infos_lock);
2524 down(&smi_infos_lock);
2525 if (list_empty(&smi_infos)) {
2526 up(&smi_infos_lock);
2528 pci_unregister_driver(&ipmi_pci_driver);
2530 printk("ipmi_si: Unable to find any System Interface(s)\n");
2533 up(&smi_infos_lock);
2537 module_init(init_ipmi_si);
2539 static void __devexit cleanup_one_si(struct smi_info *to_clean)
2542 unsigned long flags;
2547 list_del(&to_clean->link);
2549 /* Tell the timer and interrupt handlers that we are shutting
2551 spin_lock_irqsave(&(to_clean->si_lock), flags);
2552 spin_lock(&(to_clean->msg_lock));
2554 atomic_inc(&to_clean->stop_operation);
2556 if (to_clean->irq_cleanup)
2557 to_clean->irq_cleanup(to_clean);
2559 spin_unlock(&(to_clean->msg_lock));
2560 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2562 /* Wait until we know that we are out of any interrupt
2563 handlers might have been running before we freed the
2565 synchronize_sched();
2567 wait_for_timer_and_thread(to_clean);
2569 /* Interrupts and timeouts are stopped, now make sure the
2570 interface is in a clean state. */
2571 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2573 schedule_timeout_uninterruptible(1);
2576 rv = ipmi_unregister_smi(to_clean->intf);
2579 "ipmi_si: Unable to unregister device: errno=%d\n",
2583 to_clean->handlers->cleanup(to_clean->si_sm);
2585 kfree(to_clean->si_sm);
2587 if (to_clean->addr_source_cleanup)
2588 to_clean->addr_source_cleanup(to_clean);
2589 if (to_clean->io_cleanup)
2590 to_clean->io_cleanup(to_clean);
2592 if (to_clean->dev_registered)
2593 platform_device_unregister(to_clean->pdev);
2598 static __exit void cleanup_ipmi_si(void)
2600 struct smi_info *e, *tmp_e;
2606 pci_unregister_driver(&ipmi_pci_driver);
2609 down(&smi_infos_lock);
2610 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2612 up(&smi_infos_lock);
2614 driver_unregister(&ipmi_driver);
2616 module_exit(cleanup_ipmi_si);
2618 MODULE_LICENSE("GPL");
2619 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2620 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");