4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
172 struct kref refcount;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount;
196 /* Used for a list of interfaces. */
197 struct list_head link;
199 /* The list of upper layers that are using me. seq_lock
201 struct list_head users;
203 /* Information to supply to users. */
204 unsigned char ipmi_version_major;
205 unsigned char ipmi_version_minor;
207 /* Used for wake ups at startup. */
208 wait_queue_head_t waitq;
210 struct bmc_device *bmc;
214 /* This is the lower-layer's sender routine. Note that you
215 * must either be holding the ipmi_interfaces_mutex or be in
216 * an umpreemptible region to use this. You must fetch the
217 * value into a local variable and make sure it is not NULL. */
218 struct ipmi_smi_handlers *handlers;
221 #ifdef CONFIG_PROC_FS
222 /* A list of proc entries for this interface. This does not
223 need a lock, only one thread creates it and only one thread
225 spinlock_t proc_entry_lock;
226 struct ipmi_proc_entry *proc_entries;
229 /* Driver-model device for the system interface. */
230 struct device *si_dev;
232 /* A table of sequence numbers for this interface. We use the
233 sequence numbers for IPMB messages that go out of the
234 interface to match them up with their responses. A routine
235 is called periodically to time the items in this list. */
237 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
240 /* Messages that were delayed for some reason (out of memory,
241 for instance), will go in here to be processed later in a
242 periodic timer interrupt. */
243 spinlock_t waiting_msgs_lock;
244 struct list_head waiting_msgs;
246 /* The list of command receivers that are registered for commands
247 on this interface. */
248 struct mutex cmd_rcvrs_mutex;
249 struct list_head cmd_rcvrs;
251 /* Events that were queues because no one was there to receive
253 spinlock_t events_lock; /* For dealing with event stuff. */
254 struct list_head waiting_events;
255 unsigned int waiting_events_count; /* How many events in queue? */
256 int delivering_events;
258 /* The event receiver for my BMC, only really used at panic
259 shutdown as a place to store this. */
260 unsigned char event_receiver;
261 unsigned char event_receiver_lun;
262 unsigned char local_sel_device;
263 unsigned char local_event_generator;
265 /* A cheap hack, if this is non-null and a message to an
266 interface comes in with a NULL user, call this routine with
267 it. Note that the message will still be freed by the
268 caller. This only works on the system interface. */
269 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
271 /* When we are scanning the channels for an SMI, this will
272 tell which channel we are scanning. */
275 /* Channel information */
276 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
279 struct proc_dir_entry *proc_dir;
280 char proc_dir_name[10];
282 spinlock_t counter_lock; /* For making counters atomic. */
284 /* Commands we got that were invalid. */
285 unsigned int sent_invalid_commands;
287 /* Commands we sent to the MC. */
288 unsigned int sent_local_commands;
289 /* Responses from the MC that were delivered to a user. */
290 unsigned int handled_local_responses;
291 /* Responses from the MC that were not delivered to a user. */
292 unsigned int unhandled_local_responses;
294 /* Commands we sent out to the IPMB bus. */
295 unsigned int sent_ipmb_commands;
296 /* Commands sent on the IPMB that had errors on the SEND CMD */
297 unsigned int sent_ipmb_command_errs;
298 /* Each retransmit increments this count. */
299 unsigned int retransmitted_ipmb_commands;
300 /* When a message times out (runs out of retransmits) this is
302 unsigned int timed_out_ipmb_commands;
304 /* This is like above, but for broadcasts. Broadcasts are
305 *not* included in the above count (they are expected to
307 unsigned int timed_out_ipmb_broadcasts;
309 /* Responses I have sent to the IPMB bus. */
310 unsigned int sent_ipmb_responses;
312 /* The response was delivered to the user. */
313 unsigned int handled_ipmb_responses;
314 /* The response had invalid data in it. */
315 unsigned int invalid_ipmb_responses;
316 /* The response didn't have anyone waiting for it. */
317 unsigned int unhandled_ipmb_responses;
319 /* Commands we sent out to the IPMB bus. */
320 unsigned int sent_lan_commands;
321 /* Commands sent on the IPMB that had errors on the SEND CMD */
322 unsigned int sent_lan_command_errs;
323 /* Each retransmit increments this count. */
324 unsigned int retransmitted_lan_commands;
325 /* When a message times out (runs out of retransmits) this is
327 unsigned int timed_out_lan_commands;
329 /* Responses I have sent to the IPMB bus. */
330 unsigned int sent_lan_responses;
332 /* The response was delivered to the user. */
333 unsigned int handled_lan_responses;
334 /* The response had invalid data in it. */
335 unsigned int invalid_lan_responses;
336 /* The response didn't have anyone waiting for it. */
337 unsigned int unhandled_lan_responses;
339 /* The command was delivered to the user. */
340 unsigned int handled_commands;
341 /* The command had invalid data in it. */
342 unsigned int invalid_commands;
343 /* The command didn't have anyone waiting for it. */
344 unsigned int unhandled_commands;
346 /* Invalid data in an event. */
347 unsigned int invalid_events;
348 /* Events that were received with the proper format. */
351 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
354 * The driver model view of the IPMI messaging driver.
356 static struct device_driver ipmidriver = {
358 .bus = &platform_bus_type
360 static DEFINE_MUTEX(ipmidriver_mutex);
362 static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
363 static DEFINE_MUTEX(ipmi_interfaces_mutex);
365 /* List of watchers that want to know when smi's are added and
367 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
368 static DEFINE_MUTEX(smi_watchers_mutex);
371 static void free_recv_msg_list(struct list_head *q)
373 struct ipmi_recv_msg *msg, *msg2;
375 list_for_each_entry_safe(msg, msg2, q, link) {
376 list_del(&msg->link);
377 ipmi_free_recv_msg(msg);
381 static void free_smi_msg_list(struct list_head *q)
383 struct ipmi_smi_msg *msg, *msg2;
385 list_for_each_entry_safe(msg, msg2, q, link) {
386 list_del(&msg->link);
387 ipmi_free_smi_msg(msg);
391 static void clean_up_interface_data(ipmi_smi_t intf)
394 struct cmd_rcvr *rcvr, *rcvr2;
395 struct list_head list;
397 free_smi_msg_list(&intf->waiting_msgs);
398 free_recv_msg_list(&intf->waiting_events);
400 /* Wholesale remove all the entries from the list in the
401 * interface and wait for RCU to know that none are in use. */
402 mutex_lock(&intf->cmd_rcvrs_mutex);
403 list_add_rcu(&list, &intf->cmd_rcvrs);
404 list_del_rcu(&intf->cmd_rcvrs);
405 mutex_unlock(&intf->cmd_rcvrs_mutex);
408 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
411 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
412 if ((intf->seq_table[i].inuse)
413 && (intf->seq_table[i].recv_msg))
415 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
420 static void intf_free(struct kref *ref)
422 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
424 clean_up_interface_data(intf);
428 struct watcher_entry {
431 struct list_head link;
434 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
437 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
438 struct watcher_entry *e, *e2;
440 mutex_lock(&smi_watchers_mutex);
442 mutex_lock(&ipmi_interfaces_mutex);
444 /* Build a list of things to deliver. */
445 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
446 if (intf->intf_num == -1)
448 e = kmalloc(sizeof(*e), GFP_KERNEL);
451 kref_get(&intf->refcount);
453 e->intf_num = intf->intf_num;
454 list_add_tail(&e->link, &to_deliver);
457 /* We will succeed, so add it to the list. */
458 list_add(&watcher->link, &smi_watchers);
460 mutex_unlock(&ipmi_interfaces_mutex);
462 list_for_each_entry_safe(e, e2, &to_deliver, link) {
464 watcher->new_smi(e->intf_num, e->intf->si_dev);
465 kref_put(&e->intf->refcount, intf_free);
469 mutex_unlock(&smi_watchers_mutex);
474 mutex_unlock(&ipmi_interfaces_mutex);
475 mutex_unlock(&smi_watchers_mutex);
476 list_for_each_entry_safe(e, e2, &to_deliver, link) {
478 kref_put(&e->intf->refcount, intf_free);
484 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
486 mutex_lock(&smi_watchers_mutex);
487 list_del(&(watcher->link));
488 mutex_unlock(&smi_watchers_mutex);
493 * Must be called with smi_watchers_mutex held.
496 call_smi_watchers(int i, struct device *dev)
498 struct ipmi_smi_watcher *w;
500 list_for_each_entry(w, &smi_watchers, link) {
501 if (try_module_get(w->owner)) {
503 module_put(w->owner);
509 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
511 if (addr1->addr_type != addr2->addr_type)
514 if (addr1->channel != addr2->channel)
517 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
518 struct ipmi_system_interface_addr *smi_addr1
519 = (struct ipmi_system_interface_addr *) addr1;
520 struct ipmi_system_interface_addr *smi_addr2
521 = (struct ipmi_system_interface_addr *) addr2;
522 return (smi_addr1->lun == smi_addr2->lun);
525 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
526 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
528 struct ipmi_ipmb_addr *ipmb_addr1
529 = (struct ipmi_ipmb_addr *) addr1;
530 struct ipmi_ipmb_addr *ipmb_addr2
531 = (struct ipmi_ipmb_addr *) addr2;
533 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
534 && (ipmb_addr1->lun == ipmb_addr2->lun));
537 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
538 struct ipmi_lan_addr *lan_addr1
539 = (struct ipmi_lan_addr *) addr1;
540 struct ipmi_lan_addr *lan_addr2
541 = (struct ipmi_lan_addr *) addr2;
543 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
544 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
545 && (lan_addr1->session_handle
546 == lan_addr2->session_handle)
547 && (lan_addr1->lun == lan_addr2->lun));
553 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
555 if (len < sizeof(struct ipmi_system_interface_addr)) {
559 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
560 if (addr->channel != IPMI_BMC_CHANNEL)
565 if ((addr->channel == IPMI_BMC_CHANNEL)
566 || (addr->channel >= IPMI_MAX_CHANNELS)
567 || (addr->channel < 0))
570 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
571 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
573 if (len < sizeof(struct ipmi_ipmb_addr)) {
579 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
580 if (len < sizeof(struct ipmi_lan_addr)) {
589 unsigned int ipmi_addr_length(int addr_type)
591 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
592 return sizeof(struct ipmi_system_interface_addr);
594 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
595 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
597 return sizeof(struct ipmi_ipmb_addr);
600 if (addr_type == IPMI_LAN_ADDR_TYPE)
601 return sizeof(struct ipmi_lan_addr);
606 static void deliver_response(struct ipmi_recv_msg *msg)
609 ipmi_smi_t intf = msg->user_msg_data;
612 /* Special handling for NULL users. */
613 if (intf->null_user_handler) {
614 intf->null_user_handler(intf, msg);
615 spin_lock_irqsave(&intf->counter_lock, flags);
616 intf->handled_local_responses++;
617 spin_unlock_irqrestore(&intf->counter_lock, flags);
619 /* No handler, so give up. */
620 spin_lock_irqsave(&intf->counter_lock, flags);
621 intf->unhandled_local_responses++;
622 spin_unlock_irqrestore(&intf->counter_lock, flags);
624 ipmi_free_recv_msg(msg);
626 ipmi_user_t user = msg->user;
627 user->handler->ipmi_recv_hndl(msg, user->handler_data);
632 deliver_err_response(struct ipmi_recv_msg *msg, int err)
634 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
635 msg->msg_data[0] = err;
636 msg->msg.netfn |= 1; /* Convert to a response. */
637 msg->msg.data_len = 1;
638 msg->msg.data = msg->msg_data;
639 deliver_response(msg);
642 /* Find the next sequence number not being used and add the given
643 message with the given timeout to the sequence table. This must be
644 called with the interface's seq_lock held. */
645 static int intf_next_seq(ipmi_smi_t intf,
646 struct ipmi_recv_msg *recv_msg,
647 unsigned long timeout,
656 for (i = intf->curr_seq;
657 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
658 i = (i+1)%IPMI_IPMB_NUM_SEQ)
660 if (!intf->seq_table[i].inuse)
664 if (!intf->seq_table[i].inuse) {
665 intf->seq_table[i].recv_msg = recv_msg;
667 /* Start with the maximum timeout, when the send response
668 comes in we will start the real timer. */
669 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
670 intf->seq_table[i].orig_timeout = timeout;
671 intf->seq_table[i].retries_left = retries;
672 intf->seq_table[i].broadcast = broadcast;
673 intf->seq_table[i].inuse = 1;
674 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
676 *seqid = intf->seq_table[i].seqid;
677 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
685 /* Return the receive message for the given sequence number and
686 release the sequence number so it can be reused. Some other data
687 is passed in to be sure the message matches up correctly (to help
688 guard against message coming in after their timeout and the
689 sequence number being reused). */
690 static int intf_find_seq(ipmi_smi_t intf,
695 struct ipmi_addr *addr,
696 struct ipmi_recv_msg **recv_msg)
701 if (seq >= IPMI_IPMB_NUM_SEQ)
704 spin_lock_irqsave(&(intf->seq_lock), flags);
705 if (intf->seq_table[seq].inuse) {
706 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
708 if ((msg->addr.channel == channel)
709 && (msg->msg.cmd == cmd)
710 && (msg->msg.netfn == netfn)
711 && (ipmi_addr_equal(addr, &(msg->addr))))
714 intf->seq_table[seq].inuse = 0;
718 spin_unlock_irqrestore(&(intf->seq_lock), flags);
724 /* Start the timer for a specific sequence table entry. */
725 static int intf_start_seq_timer(ipmi_smi_t intf,
734 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
736 spin_lock_irqsave(&(intf->seq_lock), flags);
737 /* We do this verification because the user can be deleted
738 while a message is outstanding. */
739 if ((intf->seq_table[seq].inuse)
740 && (intf->seq_table[seq].seqid == seqid))
742 struct seq_table *ent = &(intf->seq_table[seq]);
743 ent->timeout = ent->orig_timeout;
746 spin_unlock_irqrestore(&(intf->seq_lock), flags);
751 /* Got an error for the send message for a specific sequence number. */
752 static int intf_err_seq(ipmi_smi_t intf,
760 struct ipmi_recv_msg *msg = NULL;
763 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
765 spin_lock_irqsave(&(intf->seq_lock), flags);
766 /* We do this verification because the user can be deleted
767 while a message is outstanding. */
768 if ((intf->seq_table[seq].inuse)
769 && (intf->seq_table[seq].seqid == seqid))
771 struct seq_table *ent = &(intf->seq_table[seq]);
777 spin_unlock_irqrestore(&(intf->seq_lock), flags);
780 deliver_err_response(msg, err);
786 int ipmi_create_user(unsigned int if_num,
787 struct ipmi_user_hndl *handler,
792 ipmi_user_t new_user;
796 /* There is no module usecount here, because it's not
797 required. Since this can only be used by and called from
798 other modules, they will implicitly use this module, and
799 thus this can't be removed unless the other modules are
805 /* Make sure the driver is actually initialized, this handles
806 problems with initialization order. */
808 rv = ipmi_init_msghandler();
812 /* The init code doesn't return an error if it was turned
813 off, but it won't initialize. Check that. */
818 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
822 mutex_lock(&ipmi_interfaces_mutex);
823 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
824 if (intf->intf_num == if_num)
827 /* Not found, return an error */
832 /* Note that each existing user holds a refcount to the interface. */
833 kref_get(&intf->refcount);
835 kref_init(&new_user->refcount);
836 new_user->handler = handler;
837 new_user->handler_data = handler_data;
838 new_user->intf = intf;
839 new_user->gets_events = 0;
841 if (!try_module_get(intf->handlers->owner)) {
846 if (intf->handlers->inc_usecount) {
847 rv = intf->handlers->inc_usecount(intf->send_info);
849 module_put(intf->handlers->owner);
854 /* Hold the lock so intf->handlers is guaranteed to be good
856 mutex_unlock(&ipmi_interfaces_mutex);
859 spin_lock_irqsave(&intf->seq_lock, flags);
860 list_add_rcu(&new_user->link, &intf->users);
861 spin_unlock_irqrestore(&intf->seq_lock, flags);
866 kref_put(&intf->refcount, intf_free);
868 mutex_unlock(&ipmi_interfaces_mutex);
873 static void free_user(struct kref *ref)
875 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
879 int ipmi_destroy_user(ipmi_user_t user)
881 ipmi_smi_t intf = user->intf;
884 struct cmd_rcvr *rcvr;
885 struct cmd_rcvr *rcvrs = NULL;
889 /* Remove the user from the interface's sequence table. */
890 spin_lock_irqsave(&intf->seq_lock, flags);
891 list_del_rcu(&user->link);
893 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
894 if (intf->seq_table[i].inuse
895 && (intf->seq_table[i].recv_msg->user == user))
897 intf->seq_table[i].inuse = 0;
898 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
901 spin_unlock_irqrestore(&intf->seq_lock, flags);
904 * Remove the user from the command receiver's table. First
905 * we build a list of everything (not using the standard link,
906 * since other things may be using it till we do
907 * synchronize_rcu()) then free everything in that list.
909 mutex_lock(&intf->cmd_rcvrs_mutex);
910 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
911 if (rcvr->user == user) {
912 list_del_rcu(&rcvr->link);
917 mutex_unlock(&intf->cmd_rcvrs_mutex);
925 mutex_lock(&ipmi_interfaces_mutex);
926 if (intf->handlers) {
927 module_put(intf->handlers->owner);
928 if (intf->handlers->dec_usecount)
929 intf->handlers->dec_usecount(intf->send_info);
931 mutex_unlock(&ipmi_interfaces_mutex);
933 kref_put(&intf->refcount, intf_free);
935 kref_put(&user->refcount, free_user);
940 void ipmi_get_version(ipmi_user_t user,
941 unsigned char *major,
942 unsigned char *minor)
944 *major = user->intf->ipmi_version_major;
945 *minor = user->intf->ipmi_version_minor;
948 int ipmi_set_my_address(ipmi_user_t user,
949 unsigned int channel,
950 unsigned char address)
952 if (channel >= IPMI_MAX_CHANNELS)
954 user->intf->channels[channel].address = address;
958 int ipmi_get_my_address(ipmi_user_t user,
959 unsigned int channel,
960 unsigned char *address)
962 if (channel >= IPMI_MAX_CHANNELS)
964 *address = user->intf->channels[channel].address;
968 int ipmi_set_my_LUN(ipmi_user_t user,
969 unsigned int channel,
972 if (channel >= IPMI_MAX_CHANNELS)
974 user->intf->channels[channel].lun = LUN & 0x3;
978 int ipmi_get_my_LUN(ipmi_user_t user,
979 unsigned int channel,
980 unsigned char *address)
982 if (channel >= IPMI_MAX_CHANNELS)
984 *address = user->intf->channels[channel].lun;
988 int ipmi_set_gets_events(ipmi_user_t user, int val)
991 ipmi_smi_t intf = user->intf;
992 struct ipmi_recv_msg *msg, *msg2;
993 struct list_head msgs;
995 INIT_LIST_HEAD(&msgs);
997 spin_lock_irqsave(&intf->events_lock, flags);
998 user->gets_events = val;
1000 if (intf->delivering_events)
1002 * Another thread is delivering events for this, so
1003 * let it handle any new events.
1007 /* Deliver any queued events. */
1008 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1009 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1010 list_move_tail(&msg->link, &msgs);
1011 intf->waiting_events_count = 0;
1013 intf->delivering_events = 1;
1014 spin_unlock_irqrestore(&intf->events_lock, flags);
1016 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1018 kref_get(&user->refcount);
1019 deliver_response(msg);
1022 spin_lock_irqsave(&intf->events_lock, flags);
1023 intf->delivering_events = 0;
1027 spin_unlock_irqrestore(&intf->events_lock, flags);
1032 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1033 unsigned char netfn,
1037 struct cmd_rcvr *rcvr;
1039 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1040 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1041 && (rcvr->chans & (1 << chan)))
1047 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1048 unsigned char netfn,
1052 struct cmd_rcvr *rcvr;
1054 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1055 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1056 && (rcvr->chans & chans))
1062 int ipmi_register_for_cmd(ipmi_user_t user,
1063 unsigned char netfn,
1067 ipmi_smi_t intf = user->intf;
1068 struct cmd_rcvr *rcvr;
1072 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1076 rcvr->netfn = netfn;
1077 rcvr->chans = chans;
1080 mutex_lock(&intf->cmd_rcvrs_mutex);
1081 /* Make sure the command/netfn is not already registered. */
1082 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1087 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1090 mutex_unlock(&intf->cmd_rcvrs_mutex);
1097 int ipmi_unregister_for_cmd(ipmi_user_t user,
1098 unsigned char netfn,
1102 ipmi_smi_t intf = user->intf;
1103 struct cmd_rcvr *rcvr;
1104 struct cmd_rcvr *rcvrs = NULL;
1105 int i, rv = -ENOENT;
1107 mutex_lock(&intf->cmd_rcvrs_mutex);
1108 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1109 if (((1 << i) & chans) == 0)
1111 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1114 if (rcvr->user == user) {
1116 rcvr->chans &= ~chans;
1117 if (rcvr->chans == 0) {
1118 list_del_rcu(&rcvr->link);
1124 mutex_unlock(&intf->cmd_rcvrs_mutex);
1134 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1136 ipmi_smi_t intf = user->intf;
1138 intf->handlers->set_run_to_completion(intf->send_info, val);
1141 static unsigned char
1142 ipmb_checksum(unsigned char *data, int size)
1144 unsigned char csum = 0;
1146 for (; size > 0; size--, data++)
1152 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1153 struct kernel_ipmi_msg *msg,
1154 struct ipmi_ipmb_addr *ipmb_addr,
1156 unsigned char ipmb_seq,
1158 unsigned char source_address,
1159 unsigned char source_lun)
1163 /* Format the IPMB header data. */
1164 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1165 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1166 smi_msg->data[2] = ipmb_addr->channel;
1168 smi_msg->data[3] = 0;
1169 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1170 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1171 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1172 smi_msg->data[i+6] = source_address;
1173 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1174 smi_msg->data[i+8] = msg->cmd;
1176 /* Now tack on the data to the message. */
1177 if (msg->data_len > 0)
1178 memcpy(&(smi_msg->data[i+9]), msg->data,
1180 smi_msg->data_size = msg->data_len + 9;
1182 /* Now calculate the checksum and tack it on. */
1183 smi_msg->data[i+smi_msg->data_size]
1184 = ipmb_checksum(&(smi_msg->data[i+6]),
1185 smi_msg->data_size-6);
1187 /* Add on the checksum size and the offset from the
1189 smi_msg->data_size += 1 + i;
1191 smi_msg->msgid = msgid;
1194 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1195 struct kernel_ipmi_msg *msg,
1196 struct ipmi_lan_addr *lan_addr,
1198 unsigned char ipmb_seq,
1199 unsigned char source_lun)
1201 /* Format the IPMB header data. */
1202 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1203 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1204 smi_msg->data[2] = lan_addr->channel;
1205 smi_msg->data[3] = lan_addr->session_handle;
1206 smi_msg->data[4] = lan_addr->remote_SWID;
1207 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1208 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1209 smi_msg->data[7] = lan_addr->local_SWID;
1210 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1211 smi_msg->data[9] = msg->cmd;
1213 /* Now tack on the data to the message. */
1214 if (msg->data_len > 0)
1215 memcpy(&(smi_msg->data[10]), msg->data,
1217 smi_msg->data_size = msg->data_len + 10;
1219 /* Now calculate the checksum and tack it on. */
1220 smi_msg->data[smi_msg->data_size]
1221 = ipmb_checksum(&(smi_msg->data[7]),
1222 smi_msg->data_size-7);
1224 /* Add on the checksum size and the offset from the
1226 smi_msg->data_size += 1;
1228 smi_msg->msgid = msgid;
1231 /* Separate from ipmi_request so that the user does not have to be
1232 supplied in certain circumstances (mainly at panic time). If
1233 messages are supplied, they will be freed, even if an error
1235 static int i_ipmi_request(ipmi_user_t user,
1237 struct ipmi_addr *addr,
1239 struct kernel_ipmi_msg *msg,
1240 void *user_msg_data,
1242 struct ipmi_recv_msg *supplied_recv,
1244 unsigned char source_address,
1245 unsigned char source_lun,
1247 unsigned int retry_time_ms)
1250 struct ipmi_smi_msg *smi_msg;
1251 struct ipmi_recv_msg *recv_msg;
1252 unsigned long flags;
1253 struct ipmi_smi_handlers *handlers;
1256 if (supplied_recv) {
1257 recv_msg = supplied_recv;
1259 recv_msg = ipmi_alloc_recv_msg();
1260 if (recv_msg == NULL) {
1264 recv_msg->user_msg_data = user_msg_data;
1267 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1269 smi_msg = ipmi_alloc_smi_msg();
1270 if (smi_msg == NULL) {
1271 ipmi_free_recv_msg(recv_msg);
1277 handlers = intf->handlers;
1283 recv_msg->user = user;
1285 kref_get(&user->refcount);
1286 recv_msg->msgid = msgid;
1287 /* Store the message to send in the receive message so timeout
1288 responses can get the proper response data. */
1289 recv_msg->msg = *msg;
1291 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1292 struct ipmi_system_interface_addr *smi_addr;
1294 if (msg->netfn & 1) {
1295 /* Responses are not allowed to the SMI. */
1300 smi_addr = (struct ipmi_system_interface_addr *) addr;
1301 if (smi_addr->lun > 3) {
1302 spin_lock_irqsave(&intf->counter_lock, flags);
1303 intf->sent_invalid_commands++;
1304 spin_unlock_irqrestore(&intf->counter_lock, flags);
1309 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1311 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1312 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1313 || (msg->cmd == IPMI_GET_MSG_CMD)
1314 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1316 /* We don't let the user do these, since we manage
1317 the sequence numbers. */
1318 spin_lock_irqsave(&intf->counter_lock, flags);
1319 intf->sent_invalid_commands++;
1320 spin_unlock_irqrestore(&intf->counter_lock, flags);
1325 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1326 spin_lock_irqsave(&intf->counter_lock, flags);
1327 intf->sent_invalid_commands++;
1328 spin_unlock_irqrestore(&intf->counter_lock, flags);
1333 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1334 smi_msg->data[1] = msg->cmd;
1335 smi_msg->msgid = msgid;
1336 smi_msg->user_data = recv_msg;
1337 if (msg->data_len > 0)
1338 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1339 smi_msg->data_size = msg->data_len + 2;
1340 spin_lock_irqsave(&intf->counter_lock, flags);
1341 intf->sent_local_commands++;
1342 spin_unlock_irqrestore(&intf->counter_lock, flags);
1343 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1344 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1346 struct ipmi_ipmb_addr *ipmb_addr;
1347 unsigned char ipmb_seq;
1351 if (addr->channel >= IPMI_MAX_CHANNELS) {
1352 spin_lock_irqsave(&intf->counter_lock, flags);
1353 intf->sent_invalid_commands++;
1354 spin_unlock_irqrestore(&intf->counter_lock, flags);
1359 if (intf->channels[addr->channel].medium
1360 != IPMI_CHANNEL_MEDIUM_IPMB)
1362 spin_lock_irqsave(&intf->counter_lock, flags);
1363 intf->sent_invalid_commands++;
1364 spin_unlock_irqrestore(&intf->counter_lock, flags);
1370 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1371 retries = 0; /* Don't retry broadcasts. */
1375 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1376 /* Broadcasts add a zero at the beginning of the
1377 message, but otherwise is the same as an IPMB
1379 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1384 /* Default to 1 second retries. */
1385 if (retry_time_ms == 0)
1386 retry_time_ms = 1000;
1388 /* 9 for the header and 1 for the checksum, plus
1389 possibly one for the broadcast. */
1390 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1391 spin_lock_irqsave(&intf->counter_lock, flags);
1392 intf->sent_invalid_commands++;
1393 spin_unlock_irqrestore(&intf->counter_lock, flags);
1398 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1399 if (ipmb_addr->lun > 3) {
1400 spin_lock_irqsave(&intf->counter_lock, flags);
1401 intf->sent_invalid_commands++;
1402 spin_unlock_irqrestore(&intf->counter_lock, flags);
1407 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1409 if (recv_msg->msg.netfn & 0x1) {
1410 /* It's a response, so use the user's sequence
1412 spin_lock_irqsave(&intf->counter_lock, flags);
1413 intf->sent_ipmb_responses++;
1414 spin_unlock_irqrestore(&intf->counter_lock, flags);
1415 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1417 source_address, source_lun);
1419 /* Save the receive message so we can use it
1420 to deliver the response. */
1421 smi_msg->user_data = recv_msg;
1423 /* It's a command, so get a sequence for it. */
1425 spin_lock_irqsave(&(intf->seq_lock), flags);
1427 spin_lock(&intf->counter_lock);
1428 intf->sent_ipmb_commands++;
1429 spin_unlock(&intf->counter_lock);
1431 /* Create a sequence number with a 1 second
1432 timeout and 4 retries. */
1433 rv = intf_next_seq(intf,
1441 /* We have used up all the sequence numbers,
1442 probably, so abort. */
1443 spin_unlock_irqrestore(&(intf->seq_lock),
1448 /* Store the sequence number in the message,
1449 so that when the send message response
1450 comes back we can start the timer. */
1451 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1452 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1453 ipmb_seq, broadcast,
1454 source_address, source_lun);
1456 /* Copy the message into the recv message data, so we
1457 can retransmit it later if necessary. */
1458 memcpy(recv_msg->msg_data, smi_msg->data,
1459 smi_msg->data_size);
1460 recv_msg->msg.data = recv_msg->msg_data;
1461 recv_msg->msg.data_len = smi_msg->data_size;
1463 /* We don't unlock until here, because we need
1464 to copy the completed message into the
1465 recv_msg before we release the lock.
1466 Otherwise, race conditions may bite us. I
1467 know that's pretty paranoid, but I prefer
1469 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1471 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1472 struct ipmi_lan_addr *lan_addr;
1473 unsigned char ipmb_seq;
1476 if (addr->channel >= IPMI_MAX_CHANNELS) {
1477 spin_lock_irqsave(&intf->counter_lock, flags);
1478 intf->sent_invalid_commands++;
1479 spin_unlock_irqrestore(&intf->counter_lock, flags);
1484 if ((intf->channels[addr->channel].medium
1485 != IPMI_CHANNEL_MEDIUM_8023LAN)
1486 && (intf->channels[addr->channel].medium
1487 != IPMI_CHANNEL_MEDIUM_ASYNC))
1489 spin_lock_irqsave(&intf->counter_lock, flags);
1490 intf->sent_invalid_commands++;
1491 spin_unlock_irqrestore(&intf->counter_lock, flags);
1498 /* Default to 1 second retries. */
1499 if (retry_time_ms == 0)
1500 retry_time_ms = 1000;
1502 /* 11 for the header and 1 for the checksum. */
1503 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1504 spin_lock_irqsave(&intf->counter_lock, flags);
1505 intf->sent_invalid_commands++;
1506 spin_unlock_irqrestore(&intf->counter_lock, flags);
1511 lan_addr = (struct ipmi_lan_addr *) addr;
1512 if (lan_addr->lun > 3) {
1513 spin_lock_irqsave(&intf->counter_lock, flags);
1514 intf->sent_invalid_commands++;
1515 spin_unlock_irqrestore(&intf->counter_lock, flags);
1520 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1522 if (recv_msg->msg.netfn & 0x1) {
1523 /* It's a response, so use the user's sequence
1525 spin_lock_irqsave(&intf->counter_lock, flags);
1526 intf->sent_lan_responses++;
1527 spin_unlock_irqrestore(&intf->counter_lock, flags);
1528 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1531 /* Save the receive message so we can use it
1532 to deliver the response. */
1533 smi_msg->user_data = recv_msg;
1535 /* It's a command, so get a sequence for it. */
1537 spin_lock_irqsave(&(intf->seq_lock), flags);
1539 spin_lock(&intf->counter_lock);
1540 intf->sent_lan_commands++;
1541 spin_unlock(&intf->counter_lock);
1543 /* Create a sequence number with a 1 second
1544 timeout and 4 retries. */
1545 rv = intf_next_seq(intf,
1553 /* We have used up all the sequence numbers,
1554 probably, so abort. */
1555 spin_unlock_irqrestore(&(intf->seq_lock),
1560 /* Store the sequence number in the message,
1561 so that when the send message response
1562 comes back we can start the timer. */
1563 format_lan_msg(smi_msg, msg, lan_addr,
1564 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1565 ipmb_seq, source_lun);
1567 /* Copy the message into the recv message data, so we
1568 can retransmit it later if necessary. */
1569 memcpy(recv_msg->msg_data, smi_msg->data,
1570 smi_msg->data_size);
1571 recv_msg->msg.data = recv_msg->msg_data;
1572 recv_msg->msg.data_len = smi_msg->data_size;
1574 /* We don't unlock until here, because we need
1575 to copy the completed message into the
1576 recv_msg before we release the lock.
1577 Otherwise, race conditions may bite us. I
1578 know that's pretty paranoid, but I prefer
1580 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1583 /* Unknown address type. */
1584 spin_lock_irqsave(&intf->counter_lock, flags);
1585 intf->sent_invalid_commands++;
1586 spin_unlock_irqrestore(&intf->counter_lock, flags);
1594 for (m = 0; m < smi_msg->data_size; m++)
1595 printk(" %2.2x", smi_msg->data[m]);
1600 handlers->sender(intf->send_info, smi_msg, priority);
1607 ipmi_free_smi_msg(smi_msg);
1608 ipmi_free_recv_msg(recv_msg);
1612 static int check_addr(ipmi_smi_t intf,
1613 struct ipmi_addr *addr,
1614 unsigned char *saddr,
1617 if (addr->channel >= IPMI_MAX_CHANNELS)
1619 *lun = intf->channels[addr->channel].lun;
1620 *saddr = intf->channels[addr->channel].address;
1624 int ipmi_request_settime(ipmi_user_t user,
1625 struct ipmi_addr *addr,
1627 struct kernel_ipmi_msg *msg,
1628 void *user_msg_data,
1631 unsigned int retry_time_ms)
1633 unsigned char saddr, lun;
1638 rv = check_addr(user->intf, addr, &saddr, &lun);
1641 return i_ipmi_request(user,
1655 int ipmi_request_supply_msgs(ipmi_user_t user,
1656 struct ipmi_addr *addr,
1658 struct kernel_ipmi_msg *msg,
1659 void *user_msg_data,
1661 struct ipmi_recv_msg *supplied_recv,
1664 unsigned char saddr, lun;
1669 rv = check_addr(user->intf, addr, &saddr, &lun);
1672 return i_ipmi_request(user,
1686 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1687 int count, int *eof, void *data)
1689 char *out = (char *) page;
1690 ipmi_smi_t intf = data;
1694 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1695 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1696 out[rv-1] = '\n'; /* Replace the final space with a newline */
1702 static int version_file_read_proc(char *page, char **start, off_t off,
1703 int count, int *eof, void *data)
1705 char *out = (char *) page;
1706 ipmi_smi_t intf = data;
1708 return sprintf(out, "%d.%d\n",
1709 ipmi_version_major(&intf->bmc->id),
1710 ipmi_version_minor(&intf->bmc->id));
1713 static int stat_file_read_proc(char *page, char **start, off_t off,
1714 int count, int *eof, void *data)
1716 char *out = (char *) page;
1717 ipmi_smi_t intf = data;
1719 out += sprintf(out, "sent_invalid_commands: %d\n",
1720 intf->sent_invalid_commands);
1721 out += sprintf(out, "sent_local_commands: %d\n",
1722 intf->sent_local_commands);
1723 out += sprintf(out, "handled_local_responses: %d\n",
1724 intf->handled_local_responses);
1725 out += sprintf(out, "unhandled_local_responses: %d\n",
1726 intf->unhandled_local_responses);
1727 out += sprintf(out, "sent_ipmb_commands: %d\n",
1728 intf->sent_ipmb_commands);
1729 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1730 intf->sent_ipmb_command_errs);
1731 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1732 intf->retransmitted_ipmb_commands);
1733 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1734 intf->timed_out_ipmb_commands);
1735 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1736 intf->timed_out_ipmb_broadcasts);
1737 out += sprintf(out, "sent_ipmb_responses: %d\n",
1738 intf->sent_ipmb_responses);
1739 out += sprintf(out, "handled_ipmb_responses: %d\n",
1740 intf->handled_ipmb_responses);
1741 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1742 intf->invalid_ipmb_responses);
1743 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1744 intf->unhandled_ipmb_responses);
1745 out += sprintf(out, "sent_lan_commands: %d\n",
1746 intf->sent_lan_commands);
1747 out += sprintf(out, "sent_lan_command_errs: %d\n",
1748 intf->sent_lan_command_errs);
1749 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1750 intf->retransmitted_lan_commands);
1751 out += sprintf(out, "timed_out_lan_commands: %d\n",
1752 intf->timed_out_lan_commands);
1753 out += sprintf(out, "sent_lan_responses: %d\n",
1754 intf->sent_lan_responses);
1755 out += sprintf(out, "handled_lan_responses: %d\n",
1756 intf->handled_lan_responses);
1757 out += sprintf(out, "invalid_lan_responses: %d\n",
1758 intf->invalid_lan_responses);
1759 out += sprintf(out, "unhandled_lan_responses: %d\n",
1760 intf->unhandled_lan_responses);
1761 out += sprintf(out, "handled_commands: %d\n",
1762 intf->handled_commands);
1763 out += sprintf(out, "invalid_commands: %d\n",
1764 intf->invalid_commands);
1765 out += sprintf(out, "unhandled_commands: %d\n",
1766 intf->unhandled_commands);
1767 out += sprintf(out, "invalid_events: %d\n",
1768 intf->invalid_events);
1769 out += sprintf(out, "events: %d\n",
1772 return (out - ((char *) page));
1775 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1776 read_proc_t *read_proc, write_proc_t *write_proc,
1777 void *data, struct module *owner)
1780 #ifdef CONFIG_PROC_FS
1781 struct proc_dir_entry *file;
1782 struct ipmi_proc_entry *entry;
1784 /* Create a list element. */
1785 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1788 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1793 strcpy(entry->name, name);
1795 file = create_proc_entry(name, 0, smi->proc_dir);
1803 file->read_proc = read_proc;
1804 file->write_proc = write_proc;
1805 file->owner = owner;
1807 spin_lock(&smi->proc_entry_lock);
1808 /* Stick it on the list. */
1809 entry->next = smi->proc_entries;
1810 smi->proc_entries = entry;
1811 spin_unlock(&smi->proc_entry_lock);
1813 #endif /* CONFIG_PROC_FS */
1818 static int add_proc_entries(ipmi_smi_t smi, int num)
1822 #ifdef CONFIG_PROC_FS
1823 sprintf(smi->proc_dir_name, "%d", num);
1824 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1828 smi->proc_dir->owner = THIS_MODULE;
1832 rv = ipmi_smi_add_proc_entry(smi, "stats",
1833 stat_file_read_proc, NULL,
1837 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1838 ipmb_file_read_proc, NULL,
1842 rv = ipmi_smi_add_proc_entry(smi, "version",
1843 version_file_read_proc, NULL,
1845 #endif /* CONFIG_PROC_FS */
1850 static void remove_proc_entries(ipmi_smi_t smi)
1852 #ifdef CONFIG_PROC_FS
1853 struct ipmi_proc_entry *entry;
1855 spin_lock(&smi->proc_entry_lock);
1856 while (smi->proc_entries) {
1857 entry = smi->proc_entries;
1858 smi->proc_entries = entry->next;
1860 remove_proc_entry(entry->name, smi->proc_dir);
1864 spin_unlock(&smi->proc_entry_lock);
1865 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1866 #endif /* CONFIG_PROC_FS */
1869 static int __find_bmc_guid(struct device *dev, void *data)
1871 unsigned char *id = data;
1872 struct bmc_device *bmc = dev_get_drvdata(dev);
1873 return memcmp(bmc->guid, id, 16) == 0;
1876 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1877 unsigned char *guid)
1881 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1883 return dev_get_drvdata(dev);
1888 struct prod_dev_id {
1889 unsigned int product_id;
1890 unsigned char device_id;
1893 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1895 struct prod_dev_id *id = data;
1896 struct bmc_device *bmc = dev_get_drvdata(dev);
1898 return (bmc->id.product_id == id->product_id
1899 && bmc->id.device_id == id->device_id);
1902 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1903 struct device_driver *drv,
1904 unsigned int product_id, unsigned char device_id)
1906 struct prod_dev_id id = {
1907 .product_id = product_id,
1908 .device_id = device_id,
1912 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1914 return dev_get_drvdata(dev);
1919 static ssize_t device_id_show(struct device *dev,
1920 struct device_attribute *attr,
1923 struct bmc_device *bmc = dev_get_drvdata(dev);
1925 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1928 static ssize_t provides_dev_sdrs_show(struct device *dev,
1929 struct device_attribute *attr,
1932 struct bmc_device *bmc = dev_get_drvdata(dev);
1934 return snprintf(buf, 10, "%u\n",
1935 (bmc->id.device_revision & 0x80) >> 7);
1938 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1941 struct bmc_device *bmc = dev_get_drvdata(dev);
1943 return snprintf(buf, 20, "%u\n",
1944 bmc->id.device_revision & 0x0F);
1947 static ssize_t firmware_rev_show(struct device *dev,
1948 struct device_attribute *attr,
1951 struct bmc_device *bmc = dev_get_drvdata(dev);
1953 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1954 bmc->id.firmware_revision_2);
1957 static ssize_t ipmi_version_show(struct device *dev,
1958 struct device_attribute *attr,
1961 struct bmc_device *bmc = dev_get_drvdata(dev);
1963 return snprintf(buf, 20, "%u.%u\n",
1964 ipmi_version_major(&bmc->id),
1965 ipmi_version_minor(&bmc->id));
1968 static ssize_t add_dev_support_show(struct device *dev,
1969 struct device_attribute *attr,
1972 struct bmc_device *bmc = dev_get_drvdata(dev);
1974 return snprintf(buf, 10, "0x%02x\n",
1975 bmc->id.additional_device_support);
1978 static ssize_t manufacturer_id_show(struct device *dev,
1979 struct device_attribute *attr,
1982 struct bmc_device *bmc = dev_get_drvdata(dev);
1984 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1987 static ssize_t product_id_show(struct device *dev,
1988 struct device_attribute *attr,
1991 struct bmc_device *bmc = dev_get_drvdata(dev);
1993 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1996 static ssize_t aux_firmware_rev_show(struct device *dev,
1997 struct device_attribute *attr,
2000 struct bmc_device *bmc = dev_get_drvdata(dev);
2002 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2003 bmc->id.aux_firmware_revision[3],
2004 bmc->id.aux_firmware_revision[2],
2005 bmc->id.aux_firmware_revision[1],
2006 bmc->id.aux_firmware_revision[0]);
2009 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2012 struct bmc_device *bmc = dev_get_drvdata(dev);
2014 return snprintf(buf, 100, "%Lx%Lx\n",
2015 (long long) bmc->guid[0],
2016 (long long) bmc->guid[8]);
2019 static void remove_files(struct bmc_device *bmc)
2024 device_remove_file(&bmc->dev->dev,
2025 &bmc->device_id_attr);
2026 device_remove_file(&bmc->dev->dev,
2027 &bmc->provides_dev_sdrs_attr);
2028 device_remove_file(&bmc->dev->dev,
2029 &bmc->revision_attr);
2030 device_remove_file(&bmc->dev->dev,
2031 &bmc->firmware_rev_attr);
2032 device_remove_file(&bmc->dev->dev,
2033 &bmc->version_attr);
2034 device_remove_file(&bmc->dev->dev,
2035 &bmc->add_dev_support_attr);
2036 device_remove_file(&bmc->dev->dev,
2037 &bmc->manufacturer_id_attr);
2038 device_remove_file(&bmc->dev->dev,
2039 &bmc->product_id_attr);
2041 if (bmc->id.aux_firmware_revision_set)
2042 device_remove_file(&bmc->dev->dev,
2043 &bmc->aux_firmware_rev_attr);
2045 device_remove_file(&bmc->dev->dev,
2050 cleanup_bmc_device(struct kref *ref)
2052 struct bmc_device *bmc;
2054 bmc = container_of(ref, struct bmc_device, refcount);
2058 platform_device_unregister(bmc->dev);
2062 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2064 struct bmc_device *bmc = intf->bmc;
2066 if (intf->sysfs_name) {
2067 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2068 kfree(intf->sysfs_name);
2069 intf->sysfs_name = NULL;
2071 if (intf->my_dev_name) {
2072 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2073 kfree(intf->my_dev_name);
2074 intf->my_dev_name = NULL;
2077 mutex_lock(&ipmidriver_mutex);
2078 kref_put(&bmc->refcount, cleanup_bmc_device);
2080 mutex_unlock(&ipmidriver_mutex);
2083 static int create_files(struct bmc_device *bmc)
2087 bmc->device_id_attr.attr.name = "device_id";
2088 bmc->device_id_attr.attr.owner = THIS_MODULE;
2089 bmc->device_id_attr.attr.mode = S_IRUGO;
2090 bmc->device_id_attr.show = device_id_show;
2092 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2093 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2094 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2095 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2097 bmc->revision_attr.attr.name = "revision";
2098 bmc->revision_attr.attr.owner = THIS_MODULE;
2099 bmc->revision_attr.attr.mode = S_IRUGO;
2100 bmc->revision_attr.show = revision_show;
2102 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2103 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2104 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2105 bmc->firmware_rev_attr.show = firmware_rev_show;
2107 bmc->version_attr.attr.name = "ipmi_version";
2108 bmc->version_attr.attr.owner = THIS_MODULE;
2109 bmc->version_attr.attr.mode = S_IRUGO;
2110 bmc->version_attr.show = ipmi_version_show;
2112 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2113 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2114 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2115 bmc->add_dev_support_attr.show = add_dev_support_show;
2117 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2118 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2119 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2120 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2122 bmc->product_id_attr.attr.name = "product_id";
2123 bmc->product_id_attr.attr.owner = THIS_MODULE;
2124 bmc->product_id_attr.attr.mode = S_IRUGO;
2125 bmc->product_id_attr.show = product_id_show;
2127 bmc->guid_attr.attr.name = "guid";
2128 bmc->guid_attr.attr.owner = THIS_MODULE;
2129 bmc->guid_attr.attr.mode = S_IRUGO;
2130 bmc->guid_attr.show = guid_show;
2132 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2133 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2134 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2135 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2137 err = device_create_file(&bmc->dev->dev,
2138 &bmc->device_id_attr);
2140 err = device_create_file(&bmc->dev->dev,
2141 &bmc->provides_dev_sdrs_attr);
2142 if (err) goto out_devid;
2143 err = device_create_file(&bmc->dev->dev,
2144 &bmc->revision_attr);
2145 if (err) goto out_sdrs;
2146 err = device_create_file(&bmc->dev->dev,
2147 &bmc->firmware_rev_attr);
2148 if (err) goto out_rev;
2149 err = device_create_file(&bmc->dev->dev,
2150 &bmc->version_attr);
2151 if (err) goto out_firm;
2152 err = device_create_file(&bmc->dev->dev,
2153 &bmc->add_dev_support_attr);
2154 if (err) goto out_version;
2155 err = device_create_file(&bmc->dev->dev,
2156 &bmc->manufacturer_id_attr);
2157 if (err) goto out_add_dev;
2158 err = device_create_file(&bmc->dev->dev,
2159 &bmc->product_id_attr);
2160 if (err) goto out_manu;
2161 if (bmc->id.aux_firmware_revision_set) {
2162 err = device_create_file(&bmc->dev->dev,
2163 &bmc->aux_firmware_rev_attr);
2164 if (err) goto out_prod_id;
2166 if (bmc->guid_set) {
2167 err = device_create_file(&bmc->dev->dev,
2169 if (err) goto out_aux_firm;
2175 if (bmc->id.aux_firmware_revision_set)
2176 device_remove_file(&bmc->dev->dev,
2177 &bmc->aux_firmware_rev_attr);
2179 device_remove_file(&bmc->dev->dev,
2180 &bmc->product_id_attr);
2182 device_remove_file(&bmc->dev->dev,
2183 &bmc->manufacturer_id_attr);
2185 device_remove_file(&bmc->dev->dev,
2186 &bmc->add_dev_support_attr);
2188 device_remove_file(&bmc->dev->dev,
2189 &bmc->version_attr);
2191 device_remove_file(&bmc->dev->dev,
2192 &bmc->firmware_rev_attr);
2194 device_remove_file(&bmc->dev->dev,
2195 &bmc->revision_attr);
2197 device_remove_file(&bmc->dev->dev,
2198 &bmc->provides_dev_sdrs_attr);
2200 device_remove_file(&bmc->dev->dev,
2201 &bmc->device_id_attr);
2206 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2207 const char *sysfs_name)
2210 struct bmc_device *bmc = intf->bmc;
2211 struct bmc_device *old_bmc;
2215 mutex_lock(&ipmidriver_mutex);
2218 * Try to find if there is an bmc_device struct
2219 * representing the interfaced BMC already
2222 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2224 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2229 * If there is already an bmc_device, free the new one,
2230 * otherwise register the new BMC device
2234 intf->bmc = old_bmc;
2237 kref_get(&bmc->refcount);
2238 mutex_unlock(&ipmidriver_mutex);
2241 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2242 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2243 bmc->id.manufacturer_id,
2248 unsigned char orig_dev_id = bmc->id.device_id;
2249 int warn_printed = 0;
2251 snprintf(name, sizeof(name),
2252 "ipmi_bmc.%4.4x", bmc->id.product_id);
2254 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2258 if (!warn_printed) {
2259 printk(KERN_WARNING PFX
2260 "This machine has two different BMCs"
2261 " with the same product id and device"
2262 " id. This is an error in the"
2263 " firmware, but incrementing the"
2264 " device id to work around the problem."
2265 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2266 bmc->id.product_id, bmc->id.device_id);
2269 bmc->id.device_id++; /* Wraps at 255 */
2270 if (bmc->id.device_id == orig_dev_id) {
2272 "Out of device ids!\n");
2277 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2279 mutex_unlock(&ipmidriver_mutex);
2282 " Unable to allocate platform device\n");
2285 bmc->dev->dev.driver = &ipmidriver;
2286 dev_set_drvdata(&bmc->dev->dev, bmc);
2287 kref_init(&bmc->refcount);
2289 rv = platform_device_add(bmc->dev);
2290 mutex_unlock(&ipmidriver_mutex);
2292 platform_device_put(bmc->dev);
2296 " Unable to register bmc device: %d\n",
2298 /* Don't go to out_err, you can only do that if
2299 the device is registered already. */
2303 rv = create_files(bmc);
2305 mutex_lock(&ipmidriver_mutex);
2306 platform_device_unregister(bmc->dev);
2307 mutex_unlock(&ipmidriver_mutex);
2313 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2314 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2315 bmc->id.manufacturer_id,
2321 * create symlink from system interface device to bmc device
2324 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2325 if (!intf->sysfs_name) {
2328 "ipmi_msghandler: allocate link to BMC: %d\n",
2333 rv = sysfs_create_link(&intf->si_dev->kobj,
2334 &bmc->dev->dev.kobj, intf->sysfs_name);
2336 kfree(intf->sysfs_name);
2337 intf->sysfs_name = NULL;
2339 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2344 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2345 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2346 if (!intf->my_dev_name) {
2347 kfree(intf->sysfs_name);
2348 intf->sysfs_name = NULL;
2351 "ipmi_msghandler: allocate link from BMC: %d\n",
2355 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2357 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2360 kfree(intf->sysfs_name);
2361 intf->sysfs_name = NULL;
2362 kfree(intf->my_dev_name);
2363 intf->my_dev_name = NULL;
2366 " Unable to create symlink to bmc: %d\n",
2374 ipmi_bmc_unregister(intf);
2379 send_guid_cmd(ipmi_smi_t intf, int chan)
2381 struct kernel_ipmi_msg msg;
2382 struct ipmi_system_interface_addr si;
2384 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2385 si.channel = IPMI_BMC_CHANNEL;
2388 msg.netfn = IPMI_NETFN_APP_REQUEST;
2389 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2392 return i_ipmi_request(NULL,
2394 (struct ipmi_addr *) &si,
2401 intf->channels[0].address,
2402 intf->channels[0].lun,
2407 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2409 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2410 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2411 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2415 if (msg->msg.data[0] != 0) {
2416 /* Error from getting the GUID, the BMC doesn't have one. */
2417 intf->bmc->guid_set = 0;
2421 if (msg->msg.data_len < 17) {
2422 intf->bmc->guid_set = 0;
2423 printk(KERN_WARNING PFX
2424 "guid_handler: The GUID response from the BMC was too"
2425 " short, it was %d but should have been 17. Assuming"
2426 " GUID is not available.\n",
2431 memcpy(intf->bmc->guid, msg->msg.data, 16);
2432 intf->bmc->guid_set = 1;
2434 wake_up(&intf->waitq);
2438 get_guid(ipmi_smi_t intf)
2442 intf->bmc->guid_set = 0x2;
2443 intf->null_user_handler = guid_handler;
2444 rv = send_guid_cmd(intf, 0);
2446 /* Send failed, no GUID available. */
2447 intf->bmc->guid_set = 0;
2448 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2449 intf->null_user_handler = NULL;
2453 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2455 struct kernel_ipmi_msg msg;
2456 unsigned char data[1];
2457 struct ipmi_system_interface_addr si;
2459 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2460 si.channel = IPMI_BMC_CHANNEL;
2463 msg.netfn = IPMI_NETFN_APP_REQUEST;
2464 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2468 return i_ipmi_request(NULL,
2470 (struct ipmi_addr *) &si,
2477 intf->channels[0].address,
2478 intf->channels[0].lun,
2483 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2488 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2489 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2490 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2492 /* It's the one we want */
2493 if (msg->msg.data[0] != 0) {
2494 /* Got an error from the channel, just go on. */
2496 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2497 /* If the MC does not support this
2498 command, that is legal. We just
2499 assume it has one IPMB at channel
2501 intf->channels[0].medium
2502 = IPMI_CHANNEL_MEDIUM_IPMB;
2503 intf->channels[0].protocol
2504 = IPMI_CHANNEL_PROTOCOL_IPMB;
2507 intf->curr_channel = IPMI_MAX_CHANNELS;
2508 wake_up(&intf->waitq);
2513 if (msg->msg.data_len < 4) {
2514 /* Message not big enough, just go on. */
2517 chan = intf->curr_channel;
2518 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2519 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2522 intf->curr_channel++;
2523 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2524 wake_up(&intf->waitq);
2526 rv = send_channel_info_cmd(intf, intf->curr_channel);
2529 /* Got an error somehow, just give up. */
2530 intf->curr_channel = IPMI_MAX_CHANNELS;
2531 wake_up(&intf->waitq);
2533 printk(KERN_WARNING PFX
2534 "Error sending channel information: %d\n",
2542 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2544 struct ipmi_device_id *device_id,
2545 struct device *si_dev,
2546 const char *sysfs_name,
2547 unsigned char slave_addr)
2553 struct list_head *link;
2555 /* Make sure the driver is actually initialized, this handles
2556 problems with initialization order. */
2558 rv = ipmi_init_msghandler();
2561 /* The init code doesn't return an error if it was turned
2562 off, but it won't initialize. Check that. */
2567 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2570 memset(intf, 0, sizeof(*intf));
2572 intf->ipmi_version_major = ipmi_version_major(device_id);
2573 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2575 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2580 intf->intf_num = -1; /* Mark it invalid for now. */
2581 kref_init(&intf->refcount);
2582 intf->bmc->id = *device_id;
2583 intf->si_dev = si_dev;
2584 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2585 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2586 intf->channels[j].lun = 2;
2588 if (slave_addr != 0)
2589 intf->channels[0].address = slave_addr;
2590 INIT_LIST_HEAD(&intf->users);
2591 intf->handlers = handlers;
2592 intf->send_info = send_info;
2593 spin_lock_init(&intf->seq_lock);
2594 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2595 intf->seq_table[j].inuse = 0;
2596 intf->seq_table[j].seqid = 0;
2599 #ifdef CONFIG_PROC_FS
2600 spin_lock_init(&intf->proc_entry_lock);
2602 spin_lock_init(&intf->waiting_msgs_lock);
2603 INIT_LIST_HEAD(&intf->waiting_msgs);
2604 spin_lock_init(&intf->events_lock);
2605 INIT_LIST_HEAD(&intf->waiting_events);
2606 intf->waiting_events_count = 0;
2607 mutex_init(&intf->cmd_rcvrs_mutex);
2608 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2609 init_waitqueue_head(&intf->waitq);
2611 spin_lock_init(&intf->counter_lock);
2612 intf->proc_dir = NULL;
2614 mutex_lock(&smi_watchers_mutex);
2615 mutex_lock(&ipmi_interfaces_mutex);
2616 /* Look for a hole in the numbers. */
2618 link = &ipmi_interfaces;
2619 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2620 if (tintf->intf_num != i) {
2621 link = &tintf->link;
2626 /* Add the new interface in numeric order. */
2628 list_add_rcu(&intf->link, &ipmi_interfaces);
2630 list_add_tail_rcu(&intf->link, link);
2632 rv = handlers->start_processing(send_info, intf);
2638 if ((intf->ipmi_version_major > 1)
2639 || ((intf->ipmi_version_major == 1)
2640 && (intf->ipmi_version_minor >= 5)))
2642 /* Start scanning the channels to see what is
2644 intf->null_user_handler = channel_handler;
2645 intf->curr_channel = 0;
2646 rv = send_channel_info_cmd(intf, 0);
2650 /* Wait for the channel info to be read. */
2651 wait_event(intf->waitq,
2652 intf->curr_channel >= IPMI_MAX_CHANNELS);
2653 intf->null_user_handler = NULL;
2655 /* Assume a single IPMB channel at zero. */
2656 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2657 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2661 rv = add_proc_entries(intf, i);
2663 rv = ipmi_bmc_register(intf, i, sysfs_name);
2668 remove_proc_entries(intf);
2669 intf->handlers = NULL;
2670 list_del_rcu(&intf->link);
2671 mutex_unlock(&ipmi_interfaces_mutex);
2672 mutex_unlock(&smi_watchers_mutex);
2674 kref_put(&intf->refcount, intf_free);
2676 /* After this point the interface is legal to use. */
2678 mutex_unlock(&ipmi_interfaces_mutex);
2679 call_smi_watchers(i, intf->si_dev);
2680 mutex_unlock(&smi_watchers_mutex);
2686 static void cleanup_smi_msgs(ipmi_smi_t intf)
2689 struct seq_table *ent;
2691 /* No need for locks, the interface is down. */
2692 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2693 ent = &(intf->seq_table[i]);
2696 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2700 int ipmi_unregister_smi(ipmi_smi_t intf)
2702 struct ipmi_smi_watcher *w;
2703 int intf_num = intf->intf_num;
2705 ipmi_bmc_unregister(intf);
2707 mutex_lock(&smi_watchers_mutex);
2708 mutex_lock(&ipmi_interfaces_mutex);
2709 intf->intf_num = -1;
2710 intf->handlers = NULL;
2711 list_del_rcu(&intf->link);
2712 mutex_unlock(&ipmi_interfaces_mutex);
2715 cleanup_smi_msgs(intf);
2717 remove_proc_entries(intf);
2719 /* Call all the watcher interfaces to tell them that
2720 an interface is gone. */
2721 list_for_each_entry(w, &smi_watchers, link)
2722 w->smi_gone(intf_num);
2723 mutex_unlock(&smi_watchers_mutex);
2725 kref_put(&intf->refcount, intf_free);
2729 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2730 struct ipmi_smi_msg *msg)
2732 struct ipmi_ipmb_addr ipmb_addr;
2733 struct ipmi_recv_msg *recv_msg;
2734 unsigned long flags;
2737 /* This is 11, not 10, because the response must contain a
2738 * completion code. */
2739 if (msg->rsp_size < 11) {
2740 /* Message not big enough, just ignore it. */
2741 spin_lock_irqsave(&intf->counter_lock, flags);
2742 intf->invalid_ipmb_responses++;
2743 spin_unlock_irqrestore(&intf->counter_lock, flags);
2747 if (msg->rsp[2] != 0) {
2748 /* An error getting the response, just ignore it. */
2752 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2753 ipmb_addr.slave_addr = msg->rsp[6];
2754 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2755 ipmb_addr.lun = msg->rsp[7] & 3;
2757 /* It's a response from a remote entity. Look up the sequence
2758 number and handle the response. */
2759 if (intf_find_seq(intf,
2763 (msg->rsp[4] >> 2) & (~1),
2764 (struct ipmi_addr *) &(ipmb_addr),
2767 /* We were unable to find the sequence number,
2768 so just nuke the message. */
2769 spin_lock_irqsave(&intf->counter_lock, flags);
2770 intf->unhandled_ipmb_responses++;
2771 spin_unlock_irqrestore(&intf->counter_lock, flags);
2775 memcpy(recv_msg->msg_data,
2778 /* THe other fields matched, so no need to set them, except
2779 for netfn, which needs to be the response that was
2780 returned, not the request value. */
2781 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2782 recv_msg->msg.data = recv_msg->msg_data;
2783 recv_msg->msg.data_len = msg->rsp_size - 10;
2784 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2785 spin_lock_irqsave(&intf->counter_lock, flags);
2786 intf->handled_ipmb_responses++;
2787 spin_unlock_irqrestore(&intf->counter_lock, flags);
2788 deliver_response(recv_msg);
2793 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2794 struct ipmi_smi_msg *msg)
2796 struct cmd_rcvr *rcvr;
2798 unsigned char netfn;
2801 ipmi_user_t user = NULL;
2802 struct ipmi_ipmb_addr *ipmb_addr;
2803 struct ipmi_recv_msg *recv_msg;
2804 unsigned long flags;
2805 struct ipmi_smi_handlers *handlers;
2807 if (msg->rsp_size < 10) {
2808 /* Message not big enough, just ignore it. */
2809 spin_lock_irqsave(&intf->counter_lock, flags);
2810 intf->invalid_commands++;
2811 spin_unlock_irqrestore(&intf->counter_lock, flags);
2815 if (msg->rsp[2] != 0) {
2816 /* An error getting the response, just ignore it. */
2820 netfn = msg->rsp[4] >> 2;
2822 chan = msg->rsp[3] & 0xf;
2825 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2828 kref_get(&user->refcount);
2834 /* We didn't find a user, deliver an error response. */
2835 spin_lock_irqsave(&intf->counter_lock, flags);
2836 intf->unhandled_commands++;
2837 spin_unlock_irqrestore(&intf->counter_lock, flags);
2839 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2840 msg->data[1] = IPMI_SEND_MSG_CMD;
2841 msg->data[2] = msg->rsp[3];
2842 msg->data[3] = msg->rsp[6];
2843 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2844 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2845 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2847 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2848 msg->data[8] = msg->rsp[8]; /* cmd */
2849 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2850 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2851 msg->data_size = 11;
2856 printk("Invalid command:");
2857 for (m = 0; m < msg->data_size; m++)
2858 printk(" %2.2x", msg->data[m]);
2863 handlers = intf->handlers;
2865 handlers->sender(intf->send_info, msg, 0);
2866 /* We used the message, so return the value
2867 that causes it to not be freed or
2873 /* Deliver the message to the user. */
2874 spin_lock_irqsave(&intf->counter_lock, flags);
2875 intf->handled_commands++;
2876 spin_unlock_irqrestore(&intf->counter_lock, flags);
2878 recv_msg = ipmi_alloc_recv_msg();
2880 /* We couldn't allocate memory for the
2881 message, so requeue it for handling
2884 kref_put(&user->refcount, free_user);
2886 /* Extract the source address from the data. */
2887 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2888 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2889 ipmb_addr->slave_addr = msg->rsp[6];
2890 ipmb_addr->lun = msg->rsp[7] & 3;
2891 ipmb_addr->channel = msg->rsp[3] & 0xf;
2893 /* Extract the rest of the message information
2894 from the IPMB header.*/
2895 recv_msg->user = user;
2896 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2897 recv_msg->msgid = msg->rsp[7] >> 2;
2898 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2899 recv_msg->msg.cmd = msg->rsp[8];
2900 recv_msg->msg.data = recv_msg->msg_data;
2902 /* We chop off 10, not 9 bytes because the checksum
2903 at the end also needs to be removed. */
2904 recv_msg->msg.data_len = msg->rsp_size - 10;
2905 memcpy(recv_msg->msg_data,
2907 msg->rsp_size - 10);
2908 deliver_response(recv_msg);
2915 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2916 struct ipmi_smi_msg *msg)
2918 struct ipmi_lan_addr lan_addr;
2919 struct ipmi_recv_msg *recv_msg;
2920 unsigned long flags;
2923 /* This is 13, not 12, because the response must contain a
2924 * completion code. */
2925 if (msg->rsp_size < 13) {
2926 /* Message not big enough, just ignore it. */
2927 spin_lock_irqsave(&intf->counter_lock, flags);
2928 intf->invalid_lan_responses++;
2929 spin_unlock_irqrestore(&intf->counter_lock, flags);
2933 if (msg->rsp[2] != 0) {
2934 /* An error getting the response, just ignore it. */
2938 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2939 lan_addr.session_handle = msg->rsp[4];
2940 lan_addr.remote_SWID = msg->rsp[8];
2941 lan_addr.local_SWID = msg->rsp[5];
2942 lan_addr.channel = msg->rsp[3] & 0x0f;
2943 lan_addr.privilege = msg->rsp[3] >> 4;
2944 lan_addr.lun = msg->rsp[9] & 3;
2946 /* It's a response from a remote entity. Look up the sequence
2947 number and handle the response. */
2948 if (intf_find_seq(intf,
2952 (msg->rsp[6] >> 2) & (~1),
2953 (struct ipmi_addr *) &(lan_addr),
2956 /* We were unable to find the sequence number,
2957 so just nuke the message. */
2958 spin_lock_irqsave(&intf->counter_lock, flags);
2959 intf->unhandled_lan_responses++;
2960 spin_unlock_irqrestore(&intf->counter_lock, flags);
2964 memcpy(recv_msg->msg_data,
2966 msg->rsp_size - 11);
2967 /* The other fields matched, so no need to set them, except
2968 for netfn, which needs to be the response that was
2969 returned, not the request value. */
2970 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2971 recv_msg->msg.data = recv_msg->msg_data;
2972 recv_msg->msg.data_len = msg->rsp_size - 12;
2973 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2974 spin_lock_irqsave(&intf->counter_lock, flags);
2975 intf->handled_lan_responses++;
2976 spin_unlock_irqrestore(&intf->counter_lock, flags);
2977 deliver_response(recv_msg);
2982 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2983 struct ipmi_smi_msg *msg)
2985 struct cmd_rcvr *rcvr;
2987 unsigned char netfn;
2990 ipmi_user_t user = NULL;
2991 struct ipmi_lan_addr *lan_addr;
2992 struct ipmi_recv_msg *recv_msg;
2993 unsigned long flags;
2995 if (msg->rsp_size < 12) {
2996 /* Message not big enough, just ignore it. */
2997 spin_lock_irqsave(&intf->counter_lock, flags);
2998 intf->invalid_commands++;
2999 spin_unlock_irqrestore(&intf->counter_lock, flags);
3003 if (msg->rsp[2] != 0) {
3004 /* An error getting the response, just ignore it. */
3008 netfn = msg->rsp[6] >> 2;
3010 chan = msg->rsp[3] & 0xf;
3013 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3016 kref_get(&user->refcount);
3022 /* We didn't find a user, just give up. */
3023 spin_lock_irqsave(&intf->counter_lock, flags);
3024 intf->unhandled_commands++;
3025 spin_unlock_irqrestore(&intf->counter_lock, flags);
3027 rv = 0; /* Don't do anything with these messages, just
3028 allow them to be freed. */
3030 /* Deliver the message to the user. */
3031 spin_lock_irqsave(&intf->counter_lock, flags);
3032 intf->handled_commands++;
3033 spin_unlock_irqrestore(&intf->counter_lock, flags);
3035 recv_msg = ipmi_alloc_recv_msg();
3037 /* We couldn't allocate memory for the
3038 message, so requeue it for handling
3041 kref_put(&user->refcount, free_user);
3043 /* Extract the source address from the data. */
3044 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3045 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3046 lan_addr->session_handle = msg->rsp[4];
3047 lan_addr->remote_SWID = msg->rsp[8];
3048 lan_addr->local_SWID = msg->rsp[5];
3049 lan_addr->lun = msg->rsp[9] & 3;
3050 lan_addr->channel = msg->rsp[3] & 0xf;
3051 lan_addr->privilege = msg->rsp[3] >> 4;
3053 /* Extract the rest of the message information
3054 from the IPMB header.*/
3055 recv_msg->user = user;
3056 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3057 recv_msg->msgid = msg->rsp[9] >> 2;
3058 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3059 recv_msg->msg.cmd = msg->rsp[10];
3060 recv_msg->msg.data = recv_msg->msg_data;
3062 /* We chop off 12, not 11 bytes because the checksum
3063 at the end also needs to be removed. */
3064 recv_msg->msg.data_len = msg->rsp_size - 12;
3065 memcpy(recv_msg->msg_data,
3067 msg->rsp_size - 12);
3068 deliver_response(recv_msg);
3075 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3076 struct ipmi_smi_msg *msg)
3078 struct ipmi_system_interface_addr *smi_addr;
3080 recv_msg->msgid = 0;
3081 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3082 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3083 smi_addr->channel = IPMI_BMC_CHANNEL;
3084 smi_addr->lun = msg->rsp[0] & 3;
3085 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3086 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3087 recv_msg->msg.cmd = msg->rsp[1];
3088 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3089 recv_msg->msg.data = recv_msg->msg_data;
3090 recv_msg->msg.data_len = msg->rsp_size - 3;
3093 static int handle_read_event_rsp(ipmi_smi_t intf,
3094 struct ipmi_smi_msg *msg)
3096 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3097 struct list_head msgs;
3100 int deliver_count = 0;
3101 unsigned long flags;
3103 if (msg->rsp_size < 19) {
3104 /* Message is too small to be an IPMB event. */
3105 spin_lock_irqsave(&intf->counter_lock, flags);
3106 intf->invalid_events++;
3107 spin_unlock_irqrestore(&intf->counter_lock, flags);
3111 if (msg->rsp[2] != 0) {
3112 /* An error getting the event, just ignore it. */
3116 INIT_LIST_HEAD(&msgs);
3118 spin_lock_irqsave(&intf->events_lock, flags);
3120 spin_lock(&intf->counter_lock);
3122 spin_unlock(&intf->counter_lock);
3124 /* Allocate and fill in one message for every user that is getting
3127 list_for_each_entry_rcu(user, &intf->users, link) {
3128 if (!user->gets_events)
3131 recv_msg = ipmi_alloc_recv_msg();
3134 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3136 list_del(&recv_msg->link);
3137 ipmi_free_recv_msg(recv_msg);
3139 /* We couldn't allocate memory for the
3140 message, so requeue it for handling
3148 copy_event_into_recv_msg(recv_msg, msg);
3149 recv_msg->user = user;
3150 kref_get(&user->refcount);
3151 list_add_tail(&(recv_msg->link), &msgs);
3155 if (deliver_count) {
3156 /* Now deliver all the messages. */
3157 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3158 list_del(&recv_msg->link);
3159 deliver_response(recv_msg);
3161 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3162 /* No one to receive the message, put it in queue if there's
3163 not already too many things in the queue. */
3164 recv_msg = ipmi_alloc_recv_msg();
3166 /* We couldn't allocate memory for the
3167 message, so requeue it for handling
3173 copy_event_into_recv_msg(recv_msg, msg);
3174 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3175 intf->waiting_events_count++;
3177 /* There's too many things in the queue, discard this
3179 printk(KERN_WARNING PFX "Event queue full, discarding an"
3180 " incoming event\n");
3184 spin_unlock_irqrestore(&(intf->events_lock), flags);
3189 static int handle_bmc_rsp(ipmi_smi_t intf,
3190 struct ipmi_smi_msg *msg)
3192 struct ipmi_recv_msg *recv_msg;
3193 unsigned long flags;
3194 struct ipmi_user *user;
3196 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3197 if (recv_msg == NULL)
3199 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3200 "could be because of a malformed message, or\n"
3201 "because of a hardware error. Contact your\n"
3202 "hardware vender for assistance\n");
3206 user = recv_msg->user;
3207 /* Make sure the user still exists. */
3208 if (user && !user->valid) {
3209 /* The user for the message went away, so give up. */
3210 spin_lock_irqsave(&intf->counter_lock, flags);
3211 intf->unhandled_local_responses++;
3212 spin_unlock_irqrestore(&intf->counter_lock, flags);
3213 ipmi_free_recv_msg(recv_msg);
3215 struct ipmi_system_interface_addr *smi_addr;
3217 spin_lock_irqsave(&intf->counter_lock, flags);
3218 intf->handled_local_responses++;
3219 spin_unlock_irqrestore(&intf->counter_lock, flags);
3220 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3221 recv_msg->msgid = msg->msgid;
3222 smi_addr = ((struct ipmi_system_interface_addr *)
3224 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3225 smi_addr->channel = IPMI_BMC_CHANNEL;
3226 smi_addr->lun = msg->rsp[0] & 3;
3227 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3228 recv_msg->msg.cmd = msg->rsp[1];
3229 memcpy(recv_msg->msg_data,
3232 recv_msg->msg.data = recv_msg->msg_data;
3233 recv_msg->msg.data_len = msg->rsp_size - 2;
3234 deliver_response(recv_msg);
3240 /* Handle a new message. Return 1 if the message should be requeued,
3241 0 if the message should be freed, or -1 if the message should not
3242 be freed or requeued. */
3243 static int handle_new_recv_msg(ipmi_smi_t intf,
3244 struct ipmi_smi_msg *msg)
3252 for (m = 0; m < msg->rsp_size; m++)
3253 printk(" %2.2x", msg->rsp[m]);
3256 if (msg->rsp_size < 2) {
3257 /* Message is too small to be correct. */
3258 printk(KERN_WARNING PFX "BMC returned to small a message"
3259 " for netfn %x cmd %x, got %d bytes\n",
3260 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3262 /* Generate an error response for the message. */
3263 msg->rsp[0] = msg->data[0] | (1 << 2);
3264 msg->rsp[1] = msg->data[1];
3265 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3267 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3268 || (msg->rsp[1] != msg->data[1])) /* Command */
3270 /* The response is not even marginally correct. */
3271 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3272 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3273 (msg->data[0] >> 2) | 1, msg->data[1],
3274 msg->rsp[0] >> 2, msg->rsp[1]);
3276 /* Generate an error response for the message. */
3277 msg->rsp[0] = msg->data[0] | (1 << 2);
3278 msg->rsp[1] = msg->data[1];
3279 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3283 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3284 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3285 && (msg->user_data != NULL))
3287 /* It's a response to a response we sent. For this we
3288 deliver a send message response to the user. */
3289 struct ipmi_recv_msg *recv_msg = msg->user_data;
3292 if (msg->rsp_size < 2)
3293 /* Message is too small to be correct. */
3296 chan = msg->data[2] & 0x0f;
3297 if (chan >= IPMI_MAX_CHANNELS)
3298 /* Invalid channel number */
3304 /* Make sure the user still exists. */
3305 if (!recv_msg->user || !recv_msg->user->valid)
3308 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3309 recv_msg->msg.data = recv_msg->msg_data;
3310 recv_msg->msg.data_len = 1;
3311 recv_msg->msg_data[0] = msg->rsp[2];
3312 deliver_response(recv_msg);
3313 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3314 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3316 /* It's from the receive queue. */
3317 chan = msg->rsp[3] & 0xf;
3318 if (chan >= IPMI_MAX_CHANNELS) {
3319 /* Invalid channel number */
3324 switch (intf->channels[chan].medium) {
3325 case IPMI_CHANNEL_MEDIUM_IPMB:
3326 if (msg->rsp[4] & 0x04) {
3327 /* It's a response, so find the
3328 requesting message and send it up. */
3329 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3331 /* It's a command to the SMS from some other
3332 entity. Handle that. */
3333 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3337 case IPMI_CHANNEL_MEDIUM_8023LAN:
3338 case IPMI_CHANNEL_MEDIUM_ASYNC:
3339 if (msg->rsp[6] & 0x04) {
3340 /* It's a response, so find the
3341 requesting message and send it up. */
3342 requeue = handle_lan_get_msg_rsp(intf, msg);
3344 /* It's a command to the SMS from some other
3345 entity. Handle that. */
3346 requeue = handle_lan_get_msg_cmd(intf, msg);
3351 /* We don't handle the channel type, so just
3352 * free the message. */
3356 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3357 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3359 /* It's an asyncronous event. */
3360 requeue = handle_read_event_rsp(intf, msg);
3362 /* It's a response from the local BMC. */
3363 requeue = handle_bmc_rsp(intf, msg);
3370 /* Handle a new message from the lower layer. */
3371 void ipmi_smi_msg_received(ipmi_smi_t intf,
3372 struct ipmi_smi_msg *msg)
3374 unsigned long flags;
3378 if ((msg->data_size >= 2)
3379 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3380 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3381 && (msg->user_data == NULL))
3383 /* This is the local response to a command send, start
3384 the timer for these. The user_data will not be
3385 NULL if this is a response send, and we will let
3386 response sends just go through. */
3388 /* Check for errors, if we get certain errors (ones
3389 that mean basically we can try again later), we
3390 ignore them and start the timer. Otherwise we
3391 report the error immediately. */
3392 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3393 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3394 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3395 && (msg->rsp[2] != IPMI_BUS_ERR)
3396 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3398 int chan = msg->rsp[3] & 0xf;
3400 /* Got an error sending the message, handle it. */
3401 spin_lock_irqsave(&intf->counter_lock, flags);
3402 if (chan >= IPMI_MAX_CHANNELS)
3403 ; /* This shouldn't happen */
3404 else if ((intf->channels[chan].medium
3405 == IPMI_CHANNEL_MEDIUM_8023LAN)
3406 || (intf->channels[chan].medium
3407 == IPMI_CHANNEL_MEDIUM_ASYNC))
3408 intf->sent_lan_command_errs++;
3410 intf->sent_ipmb_command_errs++;
3411 spin_unlock_irqrestore(&intf->counter_lock, flags);
3412 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3414 /* The message was sent, start the timer. */
3415 intf_start_seq_timer(intf, msg->msgid);
3418 ipmi_free_smi_msg(msg);
3422 /* To preserve message order, if the list is not empty, we
3423 tack this message onto the end of the list. */
3424 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3425 if (!list_empty(&intf->waiting_msgs)) {
3426 list_add_tail(&msg->link, &intf->waiting_msgs);
3427 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3430 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3432 rv = handle_new_recv_msg(intf, msg);
3434 /* Could not handle the message now, just add it to a
3435 list to handle later. */
3436 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3437 list_add_tail(&msg->link, &intf->waiting_msgs);
3438 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3439 } else if (rv == 0) {
3440 ipmi_free_smi_msg(msg);
3447 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3452 list_for_each_entry_rcu(user, &intf->users, link) {
3453 if (!user->handler->ipmi_watchdog_pretimeout)
3456 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3462 static struct ipmi_smi_msg *
3463 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3464 unsigned char seq, long seqid)
3466 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3468 /* If we can't allocate the message, then just return, we
3469 get 4 retries, so this should be ok. */
3472 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3473 smi_msg->data_size = recv_msg->msg.data_len;
3474 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3480 for (m = 0; m < smi_msg->data_size; m++)
3481 printk(" %2.2x", smi_msg->data[m]);
3488 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3489 struct list_head *timeouts, long timeout_period,
3490 int slot, unsigned long *flags)
3492 struct ipmi_recv_msg *msg;
3493 struct ipmi_smi_handlers *handlers;
3495 if (intf->intf_num == -1)
3501 ent->timeout -= timeout_period;
3502 if (ent->timeout > 0)
3505 if (ent->retries_left == 0) {
3506 /* The message has used all its retries. */
3508 msg = ent->recv_msg;
3509 list_add_tail(&msg->link, timeouts);
3510 spin_lock(&intf->counter_lock);
3512 intf->timed_out_ipmb_broadcasts++;
3513 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3514 intf->timed_out_lan_commands++;
3516 intf->timed_out_ipmb_commands++;
3517 spin_unlock(&intf->counter_lock);
3519 struct ipmi_smi_msg *smi_msg;
3520 /* More retries, send again. */
3522 /* Start with the max timer, set to normal
3523 timer after the message is sent. */
3524 ent->timeout = MAX_MSG_TIMEOUT;
3525 ent->retries_left--;
3526 spin_lock(&intf->counter_lock);
3527 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3528 intf->retransmitted_lan_commands++;
3530 intf->retransmitted_ipmb_commands++;
3531 spin_unlock(&intf->counter_lock);
3533 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3538 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3540 /* Send the new message. We send with a zero
3541 * priority. It timed out, I doubt time is
3542 * that critical now, and high priority
3543 * messages are really only for messages to the
3544 * local MC, which don't get resent. */
3545 handlers = intf->handlers;
3547 intf->handlers->sender(intf->send_info,
3550 ipmi_free_smi_msg(smi_msg);
3552 spin_lock_irqsave(&intf->seq_lock, *flags);
3556 static void ipmi_timeout_handler(long timeout_period)
3559 struct list_head timeouts;
3560 struct ipmi_recv_msg *msg, *msg2;
3561 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3562 unsigned long flags;
3565 INIT_LIST_HEAD(&timeouts);
3568 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3569 /* See if any waiting messages need to be processed. */
3570 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3571 list_for_each_entry_safe(smi_msg, smi_msg2,
3572 &intf->waiting_msgs, link) {
3573 if (!handle_new_recv_msg(intf, smi_msg)) {
3574 list_del(&smi_msg->link);
3575 ipmi_free_smi_msg(smi_msg);
3577 /* To preserve message order, quit if we
3578 can't handle a message. */
3582 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3584 /* Go through the seq table and find any messages that
3585 have timed out, putting them in the timeouts
3587 spin_lock_irqsave(&intf->seq_lock, flags);
3588 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3589 check_msg_timeout(intf, &(intf->seq_table[i]),
3590 &timeouts, timeout_period, i,
3592 spin_unlock_irqrestore(&intf->seq_lock, flags);
3594 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3595 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3600 static void ipmi_request_event(void)
3603 struct ipmi_smi_handlers *handlers;
3606 /* Called from the timer, no need to check if handlers is
3608 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3609 handlers = intf->handlers;
3611 handlers->request_events(intf->send_info);
3616 static struct timer_list ipmi_timer;
3618 /* Call every ~100 ms. */
3619 #define IPMI_TIMEOUT_TIME 100
3621 /* How many jiffies does it take to get to the timeout time. */
3622 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3624 /* Request events from the queue every second (this is the number of
3625 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3626 future, IPMI will add a way to know immediately if an event is in
3627 the queue and this silliness can go away. */
3628 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3630 static atomic_t stop_operation;
3631 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3633 static void ipmi_timeout(unsigned long data)
3635 if (atomic_read(&stop_operation))
3639 if (ticks_to_req_ev == 0) {
3640 ipmi_request_event();
3641 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3644 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3646 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3650 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3651 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3653 /* FIXME - convert these to slabs. */
3654 static void free_smi_msg(struct ipmi_smi_msg *msg)
3656 atomic_dec(&smi_msg_inuse_count);
3660 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3662 struct ipmi_smi_msg *rv;
3663 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3665 rv->done = free_smi_msg;
3666 rv->user_data = NULL;
3667 atomic_inc(&smi_msg_inuse_count);
3672 static void free_recv_msg(struct ipmi_recv_msg *msg)
3674 atomic_dec(&recv_msg_inuse_count);
3678 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3680 struct ipmi_recv_msg *rv;
3682 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3685 rv->done = free_recv_msg;
3686 atomic_inc(&recv_msg_inuse_count);
3691 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3694 kref_put(&msg->user->refcount, free_user);
3698 #ifdef CONFIG_IPMI_PANIC_EVENT
3700 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3704 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3708 #ifdef CONFIG_IPMI_PANIC_STRING
3709 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3711 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3712 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3713 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3714 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3716 /* A get event receiver command, save it. */
3717 intf->event_receiver = msg->msg.data[1];
3718 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3722 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3724 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3725 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3726 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3727 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3729 /* A get device id command, save if we are an event
3730 receiver or generator. */
3731 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3732 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3737 static void send_panic_events(char *str)
3739 struct kernel_ipmi_msg msg;
3741 unsigned char data[16];
3742 struct ipmi_system_interface_addr *si;
3743 struct ipmi_addr addr;
3744 struct ipmi_smi_msg smi_msg;
3745 struct ipmi_recv_msg recv_msg;
3747 si = (struct ipmi_system_interface_addr *) &addr;
3748 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3749 si->channel = IPMI_BMC_CHANNEL;
3752 /* Fill in an event telling that we have failed. */
3753 msg.netfn = 0x04; /* Sensor or Event. */
3754 msg.cmd = 2; /* Platform event command. */
3757 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3758 data[1] = 0x03; /* This is for IPMI 1.0. */
3759 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3760 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3761 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3763 /* Put a few breadcrumbs in. Hopefully later we can add more things
3764 to make the panic events more useful. */
3771 smi_msg.done = dummy_smi_done_handler;
3772 recv_msg.done = dummy_recv_done_handler;
3774 /* For every registered interface, send the event. */
3775 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3776 if (!intf->handlers)
3777 /* Interface is not ready. */
3780 /* Send the event announcing the panic. */
3781 intf->handlers->set_run_to_completion(intf->send_info, 1);
3782 i_ipmi_request(NULL,
3791 intf->channels[0].address,
3792 intf->channels[0].lun,
3793 0, 1); /* Don't retry, and don't wait. */
3796 #ifdef CONFIG_IPMI_PANIC_STRING
3797 /* On every interface, dump a bunch of OEM event holding the
3802 /* For every registered interface, send the event. */
3803 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3805 struct ipmi_ipmb_addr *ipmb;
3808 if (intf->intf_num == -1)
3809 /* Interface was not ready yet. */
3812 /* First job here is to figure out where to send the
3813 OEM events. There's no way in IPMI to send OEM
3814 events using an event send command, so we have to
3815 find the SEL to put them in and stick them in
3818 /* Get capabilities from the get device id. */
3819 intf->local_sel_device = 0;
3820 intf->local_event_generator = 0;
3821 intf->event_receiver = 0;
3823 /* Request the device info from the local MC. */
3824 msg.netfn = IPMI_NETFN_APP_REQUEST;
3825 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3828 intf->null_user_handler = device_id_fetcher;
3829 i_ipmi_request(NULL,
3838 intf->channels[0].address,
3839 intf->channels[0].lun,
3840 0, 1); /* Don't retry, and don't wait. */
3842 if (intf->local_event_generator) {
3843 /* Request the event receiver from the local MC. */
3844 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3845 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3848 intf->null_user_handler = event_receiver_fetcher;
3849 i_ipmi_request(NULL,
3858 intf->channels[0].address,
3859 intf->channels[0].lun,
3860 0, 1); /* no retry, and no wait. */
3862 intf->null_user_handler = NULL;
3864 /* Validate the event receiver. The low bit must not
3865 be 1 (it must be a valid IPMB address), it cannot
3866 be zero, and it must not be my address. */
3867 if (((intf->event_receiver & 1) == 0)
3868 && (intf->event_receiver != 0)
3869 && (intf->event_receiver != intf->channels[0].address))
3871 /* The event receiver is valid, send an IPMB
3873 ipmb = (struct ipmi_ipmb_addr *) &addr;
3874 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3875 ipmb->channel = 0; /* FIXME - is this right? */
3876 ipmb->lun = intf->event_receiver_lun;
3877 ipmb->slave_addr = intf->event_receiver;
3878 } else if (intf->local_sel_device) {
3879 /* The event receiver was not valid (or was
3880 me), but I am an SEL device, just dump it
3882 si = (struct ipmi_system_interface_addr *) &addr;
3883 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3884 si->channel = IPMI_BMC_CHANNEL;
3887 continue; /* No where to send the event. */
3890 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3891 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3897 int size = strlen(p);
3903 data[2] = 0xf0; /* OEM event without timestamp. */
3904 data[3] = intf->channels[0].address;
3905 data[4] = j++; /* sequence # */
3906 /* Always give 11 bytes, so strncpy will fill
3907 it with zeroes for me. */
3908 strncpy(data+5, p, 11);
3911 i_ipmi_request(NULL,
3920 intf->channels[0].address,
3921 intf->channels[0].lun,
3922 0, 1); /* no retry, and no wait. */
3925 #endif /* CONFIG_IPMI_PANIC_STRING */
3927 #endif /* CONFIG_IPMI_PANIC_EVENT */
3929 static int has_panicked = 0;
3931 static int panic_event(struct notifier_block *this,
3932 unsigned long event,
3941 /* For every registered interface, set it to run to completion. */
3942 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3943 if (!intf->handlers)
3944 /* Interface is not ready. */
3947 intf->handlers->set_run_to_completion(intf->send_info, 1);
3950 #ifdef CONFIG_IPMI_PANIC_EVENT
3951 send_panic_events(ptr);
3957 static struct notifier_block panic_block = {
3958 .notifier_call = panic_event,
3960 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3963 static int ipmi_init_msghandler(void)
3970 rv = driver_register(&ipmidriver);
3972 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3976 printk(KERN_INFO "ipmi message handler version "
3977 IPMI_DRIVER_VERSION "\n");
3979 #ifdef CONFIG_PROC_FS
3980 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3981 if (!proc_ipmi_root) {
3982 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3986 proc_ipmi_root->owner = THIS_MODULE;
3987 #endif /* CONFIG_PROC_FS */
3989 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3990 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3992 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3999 static __init int ipmi_init_msghandler_mod(void)
4001 ipmi_init_msghandler();
4005 static __exit void cleanup_ipmi(void)
4012 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4014 /* This can't be called if any interfaces exist, so no worry about
4015 shutting down the interfaces. */
4017 /* Tell the timer to stop, then wait for it to stop. This avoids
4018 problems with race conditions removing the timer here. */
4019 atomic_inc(&stop_operation);
4020 del_timer_sync(&ipmi_timer);
4022 #ifdef CONFIG_PROC_FS
4023 remove_proc_entry(proc_ipmi_root->name, &proc_root);
4024 #endif /* CONFIG_PROC_FS */
4026 driver_unregister(&ipmidriver);
4030 /* Check for buffer leaks. */
4031 count = atomic_read(&smi_msg_inuse_count);
4033 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4035 count = atomic_read(&recv_msg_inuse_count);
4037 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4040 module_exit(cleanup_ipmi);
4042 module_init(ipmi_init_msghandler_mod);
4043 MODULE_LICENSE("GPL");
4044 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4045 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4046 MODULE_VERSION(IPMI_DRIVER_VERSION);
4048 EXPORT_SYMBOL(ipmi_create_user);
4049 EXPORT_SYMBOL(ipmi_destroy_user);
4050 EXPORT_SYMBOL(ipmi_get_version);
4051 EXPORT_SYMBOL(ipmi_request_settime);
4052 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4053 EXPORT_SYMBOL(ipmi_register_smi);
4054 EXPORT_SYMBOL(ipmi_unregister_smi);
4055 EXPORT_SYMBOL(ipmi_register_for_cmd);
4056 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4057 EXPORT_SYMBOL(ipmi_smi_msg_received);
4058 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4059 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4060 EXPORT_SYMBOL(ipmi_addr_length);
4061 EXPORT_SYMBOL(ipmi_validate_addr);
4062 EXPORT_SYMBOL(ipmi_set_gets_events);
4063 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4064 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4065 EXPORT_SYMBOL(ipmi_set_my_address);
4066 EXPORT_SYMBOL(ipmi_get_my_address);
4067 EXPORT_SYMBOL(ipmi_set_my_LUN);
4068 EXPORT_SYMBOL(ipmi_get_my_LUN);
4069 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4070 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4071 EXPORT_SYMBOL(ipmi_free_recv_msg);