4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
48 #define PFX "IPMI message handler: "
50 #define IPMI_DRIVER_VERSION "39.2"
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized;
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
71 * The main "user" data structure.
75 struct list_head link;
77 /* Set to "0" when the user is destroyed. */
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl *handler;
86 /* The interface this user is bound to. */
89 /* Does this interface receive IPMI events? */
95 struct list_head link;
103 * This is used to form a linked lised during mass deletion.
104 * Since this is in an RCU list, we cannot use the link above
105 * or change any data until the RCU period completes. So we
106 * use this next variable during mass deletion so we can have
107 * a list and don't have to wait and restart the search on
108 * every individual deletion of a command. */
109 struct cmd_rcvr *next;
114 unsigned int inuse : 1;
115 unsigned int broadcast : 1;
117 unsigned long timeout;
118 unsigned long orig_timeout;
119 unsigned int retries_left;
121 /* To verify on an incoming send message response that this is
122 the message that the response is for, we keep a sequence id
123 and increment it every time we send a message. */
126 /* This is held so we can properly respond to the message on a
127 timeout, and it is used to hold the temporary data for
128 retransmission, too. */
129 struct ipmi_recv_msg *recv_msg;
132 /* Store the information in a msgid (long) to allow us to find a
133 sequence table entry from the msgid. */
134 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
138 seq = ((msgid >> 26) & 0x3f); \
139 seqid = (msgid & 0x3fffff); \
142 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
146 unsigned char medium;
147 unsigned char protocol;
149 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
150 but may be changed by the user. */
151 unsigned char address;
153 /* My LUN. This should generally stay the SMS LUN, but just in
158 #ifdef CONFIG_PROC_FS
159 struct ipmi_proc_entry
162 struct ipmi_proc_entry *next;
168 struct platform_device *dev;
169 struct ipmi_device_id id;
170 unsigned char guid[16];
173 struct kref refcount;
175 /* bmc device attributes */
176 struct device_attribute device_id_attr;
177 struct device_attribute provides_dev_sdrs_attr;
178 struct device_attribute revision_attr;
179 struct device_attribute firmware_rev_attr;
180 struct device_attribute version_attr;
181 struct device_attribute add_dev_support_attr;
182 struct device_attribute manufacturer_id_attr;
183 struct device_attribute product_id_attr;
184 struct device_attribute guid_attr;
185 struct device_attribute aux_firmware_rev_attr;
189 * Various statistics for IPMI, these index stats[] in the ipmi_smi
192 /* Commands we got from the user that were invalid. */
193 #define IPMI_STAT_sent_invalid_commands 0
195 /* Commands we sent to the MC. */
196 #define IPMI_STAT_sent_local_commands 1
198 /* Responses from the MC that were delivered to a user. */
199 #define IPMI_STAT_handled_local_responses 2
201 /* Responses from the MC that were not delivered to a user. */
202 #define IPMI_STAT_unhandled_local_responses 3
204 /* Commands we sent out to the IPMB bus. */
205 #define IPMI_STAT_sent_ipmb_commands 4
207 /* Commands sent on the IPMB that had errors on the SEND CMD */
208 #define IPMI_STAT_sent_ipmb_command_errs 5
210 /* Each retransmit increments this count. */
211 #define IPMI_STAT_retransmitted_ipmb_commands 6
213 /* When a message times out (runs out of retransmits) this is incremented. */
214 #define IPMI_STAT_timed_out_ipmb_commands 7
217 * This is like above, but for broadcasts. Broadcasts are
218 * *not* included in the above count (they are expected to
221 #define IPMI_STAT_timed_out_ipmb_broadcasts 8
223 /* Responses I have sent to the IPMB bus. */
224 #define IPMI_STAT_sent_ipmb_responses 9
226 /* The response was delivered to the user. */
227 #define IPMI_STAT_handled_ipmb_responses 10
229 /* The response had invalid data in it. */
230 #define IPMI_STAT_invalid_ipmb_responses 11
232 /* The response didn't have anyone waiting for it. */
233 #define IPMI_STAT_unhandled_ipmb_responses 12
235 /* Commands we sent out to the IPMB bus. */
236 #define IPMI_STAT_sent_lan_commands 13
238 /* Commands sent on the IPMB that had errors on the SEND CMD */
239 #define IPMI_STAT_sent_lan_command_errs 14
241 /* Each retransmit increments this count. */
242 #define IPMI_STAT_retransmitted_lan_commands 15
244 /* When a message times out (runs out of retransmits) this is incremented. */
245 #define IPMI_STAT_timed_out_lan_commands 16
247 /* Responses I have sent to the IPMB bus. */
248 #define IPMI_STAT_sent_lan_responses 17
250 /* The response was delivered to the user. */
251 #define IPMI_STAT_handled_lan_responses 18
253 /* The response had invalid data in it. */
254 #define IPMI_STAT_invalid_lan_responses 19
256 /* The response didn't have anyone waiting for it. */
257 #define IPMI_STAT_unhandled_lan_responses 20
259 /* The command was delivered to the user. */
260 #define IPMI_STAT_handled_commands 21
262 /* The command had invalid data in it. */
263 #define IPMI_STAT_invalid_commands 22
265 /* The command didn't have anyone waiting for it. */
266 #define IPMI_STAT_unhandled_commands 23
268 /* Invalid data in an event. */
269 #define IPMI_STAT_invalid_events 24
271 /* Events that were received with the proper format. */
272 #define IPMI_STAT_events 25
274 /* When you add a statistic, you must update this value. */
275 #define IPMI_NUM_STATS 26
278 #define IPMI_IPMB_NUM_SEQ 64
279 #define IPMI_MAX_CHANNELS 16
282 /* What interface number are we? */
285 struct kref refcount;
287 /* Used for a list of interfaces. */
288 struct list_head link;
290 /* The list of upper layers that are using me. seq_lock
292 struct list_head users;
294 /* Information to supply to users. */
295 unsigned char ipmi_version_major;
296 unsigned char ipmi_version_minor;
298 /* Used for wake ups at startup. */
299 wait_queue_head_t waitq;
301 struct bmc_device *bmc;
305 /* This is the lower-layer's sender routine. Note that you
306 * must either be holding the ipmi_interfaces_mutex or be in
307 * an umpreemptible region to use this. You must fetch the
308 * value into a local variable and make sure it is not NULL. */
309 struct ipmi_smi_handlers *handlers;
312 #ifdef CONFIG_PROC_FS
313 /* A list of proc entries for this interface. */
314 struct mutex proc_entry_lock;
315 struct ipmi_proc_entry *proc_entries;
318 /* Driver-model device for the system interface. */
319 struct device *si_dev;
321 /* A table of sequence numbers for this interface. We use the
322 sequence numbers for IPMB messages that go out of the
323 interface to match them up with their responses. A routine
324 is called periodically to time the items in this list. */
326 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
329 /* Messages that were delayed for some reason (out of memory,
330 for instance), will go in here to be processed later in a
331 periodic timer interrupt. */
332 spinlock_t waiting_msgs_lock;
333 struct list_head waiting_msgs;
335 /* The list of command receivers that are registered for commands
336 on this interface. */
337 struct mutex cmd_rcvrs_mutex;
338 struct list_head cmd_rcvrs;
340 /* Events that were queues because no one was there to receive
342 spinlock_t events_lock; /* For dealing with event stuff. */
343 struct list_head waiting_events;
344 unsigned int waiting_events_count; /* How many events in queue? */
345 char delivering_events;
346 char event_msg_printed;
348 /* The event receiver for my BMC, only really used at panic
349 shutdown as a place to store this. */
350 unsigned char event_receiver;
351 unsigned char event_receiver_lun;
352 unsigned char local_sel_device;
353 unsigned char local_event_generator;
355 /* For handling of maintenance mode. */
356 int maintenance_mode;
357 int maintenance_mode_enable;
358 int auto_maintenance_timeout;
359 spinlock_t maintenance_mode_lock; /* Used in a timer... */
361 /* A cheap hack, if this is non-null and a message to an
362 interface comes in with a NULL user, call this routine with
363 it. Note that the message will still be freed by the
364 caller. This only works on the system interface. */
365 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
367 /* When we are scanning the channels for an SMI, this will
368 tell which channel we are scanning. */
371 /* Channel information */
372 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
375 struct proc_dir_entry *proc_dir;
376 char proc_dir_name[10];
378 atomic_t stats[IPMI_NUM_STATS];
381 * run_to_completion duplicate of smb_info, smi_info
382 * and ipmi_serial_info structures. Used to decrease numbers of
383 * parameters passed by "low" level IPMI code.
385 int run_to_completion;
387 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
390 * The driver model view of the IPMI messaging driver.
392 static struct device_driver ipmidriver = {
394 .bus = &platform_bus_type
396 static DEFINE_MUTEX(ipmidriver_mutex);
398 static LIST_HEAD(ipmi_interfaces);
399 static DEFINE_MUTEX(ipmi_interfaces_mutex);
401 /* List of watchers that want to know when smi's are added and
403 static LIST_HEAD(smi_watchers);
404 static DEFINE_MUTEX(smi_watchers_mutex);
407 #define ipmi_inc_stat(intf, stat) \
408 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
409 #define ipmi_get_stat(intf, stat) \
410 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
413 static void free_recv_msg_list(struct list_head *q)
415 struct ipmi_recv_msg *msg, *msg2;
417 list_for_each_entry_safe(msg, msg2, q, link) {
418 list_del(&msg->link);
419 ipmi_free_recv_msg(msg);
423 static void free_smi_msg_list(struct list_head *q)
425 struct ipmi_smi_msg *msg, *msg2;
427 list_for_each_entry_safe(msg, msg2, q, link) {
428 list_del(&msg->link);
429 ipmi_free_smi_msg(msg);
433 static void clean_up_interface_data(ipmi_smi_t intf)
436 struct cmd_rcvr *rcvr, *rcvr2;
437 struct list_head list;
439 free_smi_msg_list(&intf->waiting_msgs);
440 free_recv_msg_list(&intf->waiting_events);
443 * Wholesale remove all the entries from the list in the
444 * interface and wait for RCU to know that none are in use.
446 mutex_lock(&intf->cmd_rcvrs_mutex);
447 INIT_LIST_HEAD(&list);
448 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
449 mutex_unlock(&intf->cmd_rcvrs_mutex);
451 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
454 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
455 if ((intf->seq_table[i].inuse)
456 && (intf->seq_table[i].recv_msg))
458 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
463 static void intf_free(struct kref *ref)
465 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
467 clean_up_interface_data(intf);
471 struct watcher_entry {
474 struct list_head link;
477 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
480 LIST_HEAD(to_deliver);
481 struct watcher_entry *e, *e2;
483 mutex_lock(&smi_watchers_mutex);
485 mutex_lock(&ipmi_interfaces_mutex);
487 /* Build a list of things to deliver. */
488 list_for_each_entry(intf, &ipmi_interfaces, link) {
489 if (intf->intf_num == -1)
491 e = kmalloc(sizeof(*e), GFP_KERNEL);
494 kref_get(&intf->refcount);
496 e->intf_num = intf->intf_num;
497 list_add_tail(&e->link, &to_deliver);
500 /* We will succeed, so add it to the list. */
501 list_add(&watcher->link, &smi_watchers);
503 mutex_unlock(&ipmi_interfaces_mutex);
505 list_for_each_entry_safe(e, e2, &to_deliver, link) {
507 watcher->new_smi(e->intf_num, e->intf->si_dev);
508 kref_put(&e->intf->refcount, intf_free);
512 mutex_unlock(&smi_watchers_mutex);
517 mutex_unlock(&ipmi_interfaces_mutex);
518 mutex_unlock(&smi_watchers_mutex);
519 list_for_each_entry_safe(e, e2, &to_deliver, link) {
521 kref_put(&e->intf->refcount, intf_free);
527 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
529 mutex_lock(&smi_watchers_mutex);
530 list_del(&(watcher->link));
531 mutex_unlock(&smi_watchers_mutex);
536 * Must be called with smi_watchers_mutex held.
539 call_smi_watchers(int i, struct device *dev)
541 struct ipmi_smi_watcher *w;
543 list_for_each_entry(w, &smi_watchers, link) {
544 if (try_module_get(w->owner)) {
546 module_put(w->owner);
552 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
554 if (addr1->addr_type != addr2->addr_type)
557 if (addr1->channel != addr2->channel)
560 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
561 struct ipmi_system_interface_addr *smi_addr1
562 = (struct ipmi_system_interface_addr *) addr1;
563 struct ipmi_system_interface_addr *smi_addr2
564 = (struct ipmi_system_interface_addr *) addr2;
565 return (smi_addr1->lun == smi_addr2->lun);
568 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
569 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
571 struct ipmi_ipmb_addr *ipmb_addr1
572 = (struct ipmi_ipmb_addr *) addr1;
573 struct ipmi_ipmb_addr *ipmb_addr2
574 = (struct ipmi_ipmb_addr *) addr2;
576 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
577 && (ipmb_addr1->lun == ipmb_addr2->lun));
580 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
581 struct ipmi_lan_addr *lan_addr1
582 = (struct ipmi_lan_addr *) addr1;
583 struct ipmi_lan_addr *lan_addr2
584 = (struct ipmi_lan_addr *) addr2;
586 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
587 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
588 && (lan_addr1->session_handle
589 == lan_addr2->session_handle)
590 && (lan_addr1->lun == lan_addr2->lun));
596 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
598 if (len < sizeof(struct ipmi_system_interface_addr)) {
602 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
603 if (addr->channel != IPMI_BMC_CHANNEL)
608 if ((addr->channel == IPMI_BMC_CHANNEL)
609 || (addr->channel >= IPMI_MAX_CHANNELS)
610 || (addr->channel < 0))
613 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
614 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
616 if (len < sizeof(struct ipmi_ipmb_addr)) {
622 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
623 if (len < sizeof(struct ipmi_lan_addr)) {
632 unsigned int ipmi_addr_length(int addr_type)
634 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
635 return sizeof(struct ipmi_system_interface_addr);
637 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
638 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
640 return sizeof(struct ipmi_ipmb_addr);
643 if (addr_type == IPMI_LAN_ADDR_TYPE)
644 return sizeof(struct ipmi_lan_addr);
649 static void deliver_response(struct ipmi_recv_msg *msg)
652 ipmi_smi_t intf = msg->user_msg_data;
654 /* Special handling for NULL users. */
655 if (intf->null_user_handler) {
656 intf->null_user_handler(intf, msg);
657 ipmi_inc_stat(intf, handled_local_responses);
659 /* No handler, so give up. */
660 ipmi_inc_stat(intf, unhandled_local_responses);
662 ipmi_free_recv_msg(msg);
664 ipmi_user_t user = msg->user;
665 user->handler->ipmi_recv_hndl(msg, user->handler_data);
670 deliver_err_response(struct ipmi_recv_msg *msg, int err)
672 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
673 msg->msg_data[0] = err;
674 msg->msg.netfn |= 1; /* Convert to a response. */
675 msg->msg.data_len = 1;
676 msg->msg.data = msg->msg_data;
677 deliver_response(msg);
680 /* Find the next sequence number not being used and add the given
681 message with the given timeout to the sequence table. This must be
682 called with the interface's seq_lock held. */
683 static int intf_next_seq(ipmi_smi_t intf,
684 struct ipmi_recv_msg *recv_msg,
685 unsigned long timeout,
694 for (i = intf->curr_seq;
695 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
696 i = (i+1)%IPMI_IPMB_NUM_SEQ)
698 if (!intf->seq_table[i].inuse)
702 if (!intf->seq_table[i].inuse) {
703 intf->seq_table[i].recv_msg = recv_msg;
705 /* Start with the maximum timeout, when the send response
706 comes in we will start the real timer. */
707 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
708 intf->seq_table[i].orig_timeout = timeout;
709 intf->seq_table[i].retries_left = retries;
710 intf->seq_table[i].broadcast = broadcast;
711 intf->seq_table[i].inuse = 1;
712 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
714 *seqid = intf->seq_table[i].seqid;
715 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
723 /* Return the receive message for the given sequence number and
724 release the sequence number so it can be reused. Some other data
725 is passed in to be sure the message matches up correctly (to help
726 guard against message coming in after their timeout and the
727 sequence number being reused). */
728 static int intf_find_seq(ipmi_smi_t intf,
733 struct ipmi_addr *addr,
734 struct ipmi_recv_msg **recv_msg)
739 if (seq >= IPMI_IPMB_NUM_SEQ)
742 spin_lock_irqsave(&(intf->seq_lock), flags);
743 if (intf->seq_table[seq].inuse) {
744 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
746 if ((msg->addr.channel == channel)
747 && (msg->msg.cmd == cmd)
748 && (msg->msg.netfn == netfn)
749 && (ipmi_addr_equal(addr, &(msg->addr))))
752 intf->seq_table[seq].inuse = 0;
756 spin_unlock_irqrestore(&(intf->seq_lock), flags);
762 /* Start the timer for a specific sequence table entry. */
763 static int intf_start_seq_timer(ipmi_smi_t intf,
772 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
774 spin_lock_irqsave(&(intf->seq_lock), flags);
775 /* We do this verification because the user can be deleted
776 while a message is outstanding. */
777 if ((intf->seq_table[seq].inuse)
778 && (intf->seq_table[seq].seqid == seqid))
780 struct seq_table *ent = &(intf->seq_table[seq]);
781 ent->timeout = ent->orig_timeout;
784 spin_unlock_irqrestore(&(intf->seq_lock), flags);
789 /* Got an error for the send message for a specific sequence number. */
790 static int intf_err_seq(ipmi_smi_t intf,
798 struct ipmi_recv_msg *msg = NULL;
801 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
803 spin_lock_irqsave(&(intf->seq_lock), flags);
804 /* We do this verification because the user can be deleted
805 while a message is outstanding. */
806 if ((intf->seq_table[seq].inuse)
807 && (intf->seq_table[seq].seqid == seqid))
809 struct seq_table *ent = &(intf->seq_table[seq]);
815 spin_unlock_irqrestore(&(intf->seq_lock), flags);
818 deliver_err_response(msg, err);
824 int ipmi_create_user(unsigned int if_num,
825 struct ipmi_user_hndl *handler,
830 ipmi_user_t new_user;
834 /* There is no module usecount here, because it's not
835 required. Since this can only be used by and called from
836 other modules, they will implicitly use this module, and
837 thus this can't be removed unless the other modules are
843 /* Make sure the driver is actually initialized, this handles
844 problems with initialization order. */
846 rv = ipmi_init_msghandler();
850 /* The init code doesn't return an error if it was turned
851 off, but it won't initialize. Check that. */
856 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
860 mutex_lock(&ipmi_interfaces_mutex);
861 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
862 if (intf->intf_num == if_num)
865 /* Not found, return an error */
870 /* Note that each existing user holds a refcount to the interface. */
871 kref_get(&intf->refcount);
873 kref_init(&new_user->refcount);
874 new_user->handler = handler;
875 new_user->handler_data = handler_data;
876 new_user->intf = intf;
877 new_user->gets_events = 0;
879 if (!try_module_get(intf->handlers->owner)) {
884 if (intf->handlers->inc_usecount) {
885 rv = intf->handlers->inc_usecount(intf->send_info);
887 module_put(intf->handlers->owner);
892 /* Hold the lock so intf->handlers is guaranteed to be good
894 mutex_unlock(&ipmi_interfaces_mutex);
897 spin_lock_irqsave(&intf->seq_lock, flags);
898 list_add_rcu(&new_user->link, &intf->users);
899 spin_unlock_irqrestore(&intf->seq_lock, flags);
904 kref_put(&intf->refcount, intf_free);
906 mutex_unlock(&ipmi_interfaces_mutex);
911 static void free_user(struct kref *ref)
913 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
917 int ipmi_destroy_user(ipmi_user_t user)
919 ipmi_smi_t intf = user->intf;
922 struct cmd_rcvr *rcvr;
923 struct cmd_rcvr *rcvrs = NULL;
927 /* Remove the user from the interface's sequence table. */
928 spin_lock_irqsave(&intf->seq_lock, flags);
929 list_del_rcu(&user->link);
931 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
932 if (intf->seq_table[i].inuse
933 && (intf->seq_table[i].recv_msg->user == user))
935 intf->seq_table[i].inuse = 0;
936 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
939 spin_unlock_irqrestore(&intf->seq_lock, flags);
942 * Remove the user from the command receiver's table. First
943 * we build a list of everything (not using the standard link,
944 * since other things may be using it till we do
945 * synchronize_rcu()) then free everything in that list.
947 mutex_lock(&intf->cmd_rcvrs_mutex);
948 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
949 if (rcvr->user == user) {
950 list_del_rcu(&rcvr->link);
955 mutex_unlock(&intf->cmd_rcvrs_mutex);
963 mutex_lock(&ipmi_interfaces_mutex);
964 if (intf->handlers) {
965 module_put(intf->handlers->owner);
966 if (intf->handlers->dec_usecount)
967 intf->handlers->dec_usecount(intf->send_info);
969 mutex_unlock(&ipmi_interfaces_mutex);
971 kref_put(&intf->refcount, intf_free);
973 kref_put(&user->refcount, free_user);
978 void ipmi_get_version(ipmi_user_t user,
979 unsigned char *major,
980 unsigned char *minor)
982 *major = user->intf->ipmi_version_major;
983 *minor = user->intf->ipmi_version_minor;
986 int ipmi_set_my_address(ipmi_user_t user,
987 unsigned int channel,
988 unsigned char address)
990 if (channel >= IPMI_MAX_CHANNELS)
992 user->intf->channels[channel].address = address;
996 int ipmi_get_my_address(ipmi_user_t user,
997 unsigned int channel,
998 unsigned char *address)
1000 if (channel >= IPMI_MAX_CHANNELS)
1002 *address = user->intf->channels[channel].address;
1006 int ipmi_set_my_LUN(ipmi_user_t user,
1007 unsigned int channel,
1010 if (channel >= IPMI_MAX_CHANNELS)
1012 user->intf->channels[channel].lun = LUN & 0x3;
1016 int ipmi_get_my_LUN(ipmi_user_t user,
1017 unsigned int channel,
1018 unsigned char *address)
1020 if (channel >= IPMI_MAX_CHANNELS)
1022 *address = user->intf->channels[channel].lun;
1026 int ipmi_get_maintenance_mode(ipmi_user_t user)
1029 unsigned long flags;
1031 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1032 mode = user->intf->maintenance_mode;
1033 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1037 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1039 static void maintenance_mode_update(ipmi_smi_t intf)
1041 if (intf->handlers->set_maintenance_mode)
1042 intf->handlers->set_maintenance_mode(
1043 intf->send_info, intf->maintenance_mode_enable);
1046 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1049 unsigned long flags;
1050 ipmi_smi_t intf = user->intf;
1052 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1053 if (intf->maintenance_mode != mode) {
1055 case IPMI_MAINTENANCE_MODE_AUTO:
1056 intf->maintenance_mode = mode;
1057 intf->maintenance_mode_enable
1058 = (intf->auto_maintenance_timeout > 0);
1061 case IPMI_MAINTENANCE_MODE_OFF:
1062 intf->maintenance_mode = mode;
1063 intf->maintenance_mode_enable = 0;
1066 case IPMI_MAINTENANCE_MODE_ON:
1067 intf->maintenance_mode = mode;
1068 intf->maintenance_mode_enable = 1;
1076 maintenance_mode_update(intf);
1079 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1083 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1085 int ipmi_set_gets_events(ipmi_user_t user, int val)
1087 unsigned long flags;
1088 ipmi_smi_t intf = user->intf;
1089 struct ipmi_recv_msg *msg, *msg2;
1090 struct list_head msgs;
1092 INIT_LIST_HEAD(&msgs);
1094 spin_lock_irqsave(&intf->events_lock, flags);
1095 user->gets_events = val;
1097 if (intf->delivering_events)
1099 * Another thread is delivering events for this, so
1100 * let it handle any new events.
1104 /* Deliver any queued events. */
1105 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1106 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1107 list_move_tail(&msg->link, &msgs);
1108 intf->waiting_events_count = 0;
1109 if (intf->event_msg_printed) {
1110 printk(KERN_WARNING PFX "Event queue no longer"
1112 intf->event_msg_printed = 0;
1115 intf->delivering_events = 1;
1116 spin_unlock_irqrestore(&intf->events_lock, flags);
1118 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1120 kref_get(&user->refcount);
1121 deliver_response(msg);
1124 spin_lock_irqsave(&intf->events_lock, flags);
1125 intf->delivering_events = 0;
1129 spin_unlock_irqrestore(&intf->events_lock, flags);
1134 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1135 unsigned char netfn,
1139 struct cmd_rcvr *rcvr;
1141 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1142 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1143 && (rcvr->chans & (1 << chan)))
1149 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1150 unsigned char netfn,
1154 struct cmd_rcvr *rcvr;
1156 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1157 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1158 && (rcvr->chans & chans))
1164 int ipmi_register_for_cmd(ipmi_user_t user,
1165 unsigned char netfn,
1169 ipmi_smi_t intf = user->intf;
1170 struct cmd_rcvr *rcvr;
1174 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1178 rcvr->netfn = netfn;
1179 rcvr->chans = chans;
1182 mutex_lock(&intf->cmd_rcvrs_mutex);
1183 /* Make sure the command/netfn is not already registered. */
1184 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1189 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1192 mutex_unlock(&intf->cmd_rcvrs_mutex);
1199 int ipmi_unregister_for_cmd(ipmi_user_t user,
1200 unsigned char netfn,
1204 ipmi_smi_t intf = user->intf;
1205 struct cmd_rcvr *rcvr;
1206 struct cmd_rcvr *rcvrs = NULL;
1207 int i, rv = -ENOENT;
1209 mutex_lock(&intf->cmd_rcvrs_mutex);
1210 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1211 if (((1 << i) & chans) == 0)
1213 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1216 if (rcvr->user == user) {
1218 rcvr->chans &= ~chans;
1219 if (rcvr->chans == 0) {
1220 list_del_rcu(&rcvr->link);
1226 mutex_unlock(&intf->cmd_rcvrs_mutex);
1236 static unsigned char
1237 ipmb_checksum(unsigned char *data, int size)
1239 unsigned char csum = 0;
1241 for (; size > 0; size--, data++)
1247 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1248 struct kernel_ipmi_msg *msg,
1249 struct ipmi_ipmb_addr *ipmb_addr,
1251 unsigned char ipmb_seq,
1253 unsigned char source_address,
1254 unsigned char source_lun)
1258 /* Format the IPMB header data. */
1259 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1260 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1261 smi_msg->data[2] = ipmb_addr->channel;
1263 smi_msg->data[3] = 0;
1264 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1265 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1266 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1267 smi_msg->data[i+6] = source_address;
1268 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1269 smi_msg->data[i+8] = msg->cmd;
1271 /* Now tack on the data to the message. */
1272 if (msg->data_len > 0)
1273 memcpy(&(smi_msg->data[i+9]), msg->data,
1275 smi_msg->data_size = msg->data_len + 9;
1277 /* Now calculate the checksum and tack it on. */
1278 smi_msg->data[i+smi_msg->data_size]
1279 = ipmb_checksum(&(smi_msg->data[i+6]),
1280 smi_msg->data_size-6);
1282 /* Add on the checksum size and the offset from the
1284 smi_msg->data_size += 1 + i;
1286 smi_msg->msgid = msgid;
1289 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1290 struct kernel_ipmi_msg *msg,
1291 struct ipmi_lan_addr *lan_addr,
1293 unsigned char ipmb_seq,
1294 unsigned char source_lun)
1296 /* Format the IPMB header data. */
1297 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1298 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1299 smi_msg->data[2] = lan_addr->channel;
1300 smi_msg->data[3] = lan_addr->session_handle;
1301 smi_msg->data[4] = lan_addr->remote_SWID;
1302 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1303 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1304 smi_msg->data[7] = lan_addr->local_SWID;
1305 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1306 smi_msg->data[9] = msg->cmd;
1308 /* Now tack on the data to the message. */
1309 if (msg->data_len > 0)
1310 memcpy(&(smi_msg->data[10]), msg->data,
1312 smi_msg->data_size = msg->data_len + 10;
1314 /* Now calculate the checksum and tack it on. */
1315 smi_msg->data[smi_msg->data_size]
1316 = ipmb_checksum(&(smi_msg->data[7]),
1317 smi_msg->data_size-7);
1319 /* Add on the checksum size and the offset from the
1321 smi_msg->data_size += 1;
1323 smi_msg->msgid = msgid;
1326 /* Separate from ipmi_request so that the user does not have to be
1327 supplied in certain circumstances (mainly at panic time). If
1328 messages are supplied, they will be freed, even if an error
1330 static int i_ipmi_request(ipmi_user_t user,
1332 struct ipmi_addr *addr,
1334 struct kernel_ipmi_msg *msg,
1335 void *user_msg_data,
1337 struct ipmi_recv_msg *supplied_recv,
1339 unsigned char source_address,
1340 unsigned char source_lun,
1342 unsigned int retry_time_ms)
1345 struct ipmi_smi_msg *smi_msg;
1346 struct ipmi_recv_msg *recv_msg;
1347 unsigned long flags;
1348 struct ipmi_smi_handlers *handlers;
1351 if (supplied_recv) {
1352 recv_msg = supplied_recv;
1354 recv_msg = ipmi_alloc_recv_msg();
1355 if (recv_msg == NULL) {
1359 recv_msg->user_msg_data = user_msg_data;
1362 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1364 smi_msg = ipmi_alloc_smi_msg();
1365 if (smi_msg == NULL) {
1366 ipmi_free_recv_msg(recv_msg);
1372 handlers = intf->handlers;
1378 recv_msg->user = user;
1380 kref_get(&user->refcount);
1381 recv_msg->msgid = msgid;
1382 /* Store the message to send in the receive message so timeout
1383 responses can get the proper response data. */
1384 recv_msg->msg = *msg;
1386 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1387 struct ipmi_system_interface_addr *smi_addr;
1389 if (msg->netfn & 1) {
1390 /* Responses are not allowed to the SMI. */
1395 smi_addr = (struct ipmi_system_interface_addr *) addr;
1396 if (smi_addr->lun > 3) {
1397 ipmi_inc_stat(intf, sent_invalid_commands);
1402 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1404 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1405 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1406 || (msg->cmd == IPMI_GET_MSG_CMD)
1407 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1409 /* We don't let the user do these, since we manage
1410 the sequence numbers. */
1411 ipmi_inc_stat(intf, sent_invalid_commands);
1416 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1417 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1418 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1419 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1421 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1422 intf->auto_maintenance_timeout
1423 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1424 if (!intf->maintenance_mode
1425 && !intf->maintenance_mode_enable)
1427 intf->maintenance_mode_enable = 1;
1428 maintenance_mode_update(intf);
1430 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1434 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1435 ipmi_inc_stat(intf, sent_invalid_commands);
1440 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1441 smi_msg->data[1] = msg->cmd;
1442 smi_msg->msgid = msgid;
1443 smi_msg->user_data = recv_msg;
1444 if (msg->data_len > 0)
1445 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1446 smi_msg->data_size = msg->data_len + 2;
1447 ipmi_inc_stat(intf, sent_local_commands);
1448 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1449 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1451 struct ipmi_ipmb_addr *ipmb_addr;
1452 unsigned char ipmb_seq;
1456 if (addr->channel >= IPMI_MAX_CHANNELS) {
1457 ipmi_inc_stat(intf, sent_invalid_commands);
1462 if (intf->channels[addr->channel].medium
1463 != IPMI_CHANNEL_MEDIUM_IPMB)
1465 ipmi_inc_stat(intf, sent_invalid_commands);
1471 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1472 retries = 0; /* Don't retry broadcasts. */
1476 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1477 /* Broadcasts add a zero at the beginning of the
1478 message, but otherwise is the same as an IPMB
1480 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1485 /* Default to 1 second retries. */
1486 if (retry_time_ms == 0)
1487 retry_time_ms = 1000;
1489 /* 9 for the header and 1 for the checksum, plus
1490 possibly one for the broadcast. */
1491 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1492 ipmi_inc_stat(intf, sent_invalid_commands);
1497 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1498 if (ipmb_addr->lun > 3) {
1499 ipmi_inc_stat(intf, sent_invalid_commands);
1504 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1506 if (recv_msg->msg.netfn & 0x1) {
1507 /* It's a response, so use the user's sequence
1509 ipmi_inc_stat(intf, sent_ipmb_responses);
1510 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1512 source_address, source_lun);
1514 /* Save the receive message so we can use it
1515 to deliver the response. */
1516 smi_msg->user_data = recv_msg;
1518 /* It's a command, so get a sequence for it. */
1520 spin_lock_irqsave(&(intf->seq_lock), flags);
1522 ipmi_inc_stat(intf, sent_ipmb_commands);
1524 /* Create a sequence number with a 1 second
1525 timeout and 4 retries. */
1526 rv = intf_next_seq(intf,
1534 /* We have used up all the sequence numbers,
1535 probably, so abort. */
1536 spin_unlock_irqrestore(&(intf->seq_lock),
1541 /* Store the sequence number in the message,
1542 so that when the send message response
1543 comes back we can start the timer. */
1544 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1545 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1546 ipmb_seq, broadcast,
1547 source_address, source_lun);
1549 /* Copy the message into the recv message data, so we
1550 can retransmit it later if necessary. */
1551 memcpy(recv_msg->msg_data, smi_msg->data,
1552 smi_msg->data_size);
1553 recv_msg->msg.data = recv_msg->msg_data;
1554 recv_msg->msg.data_len = smi_msg->data_size;
1556 /* We don't unlock until here, because we need
1557 to copy the completed message into the
1558 recv_msg before we release the lock.
1559 Otherwise, race conditions may bite us. I
1560 know that's pretty paranoid, but I prefer
1562 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1564 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1565 struct ipmi_lan_addr *lan_addr;
1566 unsigned char ipmb_seq;
1569 if (addr->channel >= IPMI_MAX_CHANNELS) {
1570 ipmi_inc_stat(intf, sent_invalid_commands);
1575 if ((intf->channels[addr->channel].medium
1576 != IPMI_CHANNEL_MEDIUM_8023LAN)
1577 && (intf->channels[addr->channel].medium
1578 != IPMI_CHANNEL_MEDIUM_ASYNC))
1580 ipmi_inc_stat(intf, sent_invalid_commands);
1587 /* Default to 1 second retries. */
1588 if (retry_time_ms == 0)
1589 retry_time_ms = 1000;
1591 /* 11 for the header and 1 for the checksum. */
1592 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1593 ipmi_inc_stat(intf, sent_invalid_commands);
1598 lan_addr = (struct ipmi_lan_addr *) addr;
1599 if (lan_addr->lun > 3) {
1600 ipmi_inc_stat(intf, sent_invalid_commands);
1605 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1607 if (recv_msg->msg.netfn & 0x1) {
1608 /* It's a response, so use the user's sequence
1610 ipmi_inc_stat(intf, sent_lan_responses);
1611 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1614 /* Save the receive message so we can use it
1615 to deliver the response. */
1616 smi_msg->user_data = recv_msg;
1618 /* It's a command, so get a sequence for it. */
1620 spin_lock_irqsave(&(intf->seq_lock), flags);
1622 ipmi_inc_stat(intf, sent_lan_commands);
1624 /* Create a sequence number with a 1 second
1625 timeout and 4 retries. */
1626 rv = intf_next_seq(intf,
1634 /* We have used up all the sequence numbers,
1635 probably, so abort. */
1636 spin_unlock_irqrestore(&(intf->seq_lock),
1641 /* Store the sequence number in the message,
1642 so that when the send message response
1643 comes back we can start the timer. */
1644 format_lan_msg(smi_msg, msg, lan_addr,
1645 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1646 ipmb_seq, source_lun);
1648 /* Copy the message into the recv message data, so we
1649 can retransmit it later if necessary. */
1650 memcpy(recv_msg->msg_data, smi_msg->data,
1651 smi_msg->data_size);
1652 recv_msg->msg.data = recv_msg->msg_data;
1653 recv_msg->msg.data_len = smi_msg->data_size;
1655 /* We don't unlock until here, because we need
1656 to copy the completed message into the
1657 recv_msg before we release the lock.
1658 Otherwise, race conditions may bite us. I
1659 know that's pretty paranoid, but I prefer
1661 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1664 /* Unknown address type. */
1665 ipmi_inc_stat(intf, sent_invalid_commands);
1673 for (m = 0; m < smi_msg->data_size; m++)
1674 printk(" %2.2x", smi_msg->data[m]);
1679 handlers->sender(intf->send_info, smi_msg, priority);
1686 ipmi_free_smi_msg(smi_msg);
1687 ipmi_free_recv_msg(recv_msg);
1691 static int check_addr(ipmi_smi_t intf,
1692 struct ipmi_addr *addr,
1693 unsigned char *saddr,
1696 if (addr->channel >= IPMI_MAX_CHANNELS)
1698 *lun = intf->channels[addr->channel].lun;
1699 *saddr = intf->channels[addr->channel].address;
1703 int ipmi_request_settime(ipmi_user_t user,
1704 struct ipmi_addr *addr,
1706 struct kernel_ipmi_msg *msg,
1707 void *user_msg_data,
1710 unsigned int retry_time_ms)
1712 unsigned char saddr, lun;
1717 rv = check_addr(user->intf, addr, &saddr, &lun);
1720 return i_ipmi_request(user,
1734 int ipmi_request_supply_msgs(ipmi_user_t user,
1735 struct ipmi_addr *addr,
1737 struct kernel_ipmi_msg *msg,
1738 void *user_msg_data,
1740 struct ipmi_recv_msg *supplied_recv,
1743 unsigned char saddr, lun;
1748 rv = check_addr(user->intf, addr, &saddr, &lun);
1751 return i_ipmi_request(user,
1765 #ifdef CONFIG_PROC_FS
1766 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1767 int count, int *eof, void *data)
1769 char *out = (char *) page;
1770 ipmi_smi_t intf = data;
1774 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1775 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1776 out[rv-1] = '\n'; /* Replace the final space with a newline */
1782 static int version_file_read_proc(char *page, char **start, off_t off,
1783 int count, int *eof, void *data)
1785 char *out = (char *) page;
1786 ipmi_smi_t intf = data;
1788 return sprintf(out, "%u.%u\n",
1789 ipmi_version_major(&intf->bmc->id),
1790 ipmi_version_minor(&intf->bmc->id));
1793 static int stat_file_read_proc(char *page, char **start, off_t off,
1794 int count, int *eof, void *data)
1796 char *out = (char *) page;
1797 ipmi_smi_t intf = data;
1799 out += sprintf(out, "sent_invalid_commands: %u\n",
1800 ipmi_get_stat(intf, sent_invalid_commands));
1801 out += sprintf(out, "sent_local_commands: %u\n",
1802 ipmi_get_stat(intf, sent_local_commands));
1803 out += sprintf(out, "handled_local_responses: %u\n",
1804 ipmi_get_stat(intf, handled_local_responses));
1805 out += sprintf(out, "unhandled_local_responses: %u\n",
1806 ipmi_get_stat(intf, unhandled_local_responses));
1807 out += sprintf(out, "sent_ipmb_commands: %u\n",
1808 ipmi_get_stat(intf, sent_ipmb_commands));
1809 out += sprintf(out, "sent_ipmb_command_errs: %u\n",
1810 ipmi_get_stat(intf, sent_ipmb_command_errs));
1811 out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
1812 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1813 out += sprintf(out, "timed_out_ipmb_commands: %u\n",
1814 ipmi_get_stat(intf, timed_out_ipmb_commands));
1815 out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n",
1816 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1817 out += sprintf(out, "sent_ipmb_responses: %u\n",
1818 ipmi_get_stat(intf, sent_ipmb_responses));
1819 out += sprintf(out, "handled_ipmb_responses: %u\n",
1820 ipmi_get_stat(intf, handled_ipmb_responses));
1821 out += sprintf(out, "invalid_ipmb_responses: %u\n",
1822 ipmi_get_stat(intf, invalid_ipmb_responses));
1823 out += sprintf(out, "unhandled_ipmb_responses: %u\n",
1824 ipmi_get_stat(intf, unhandled_ipmb_responses));
1825 out += sprintf(out, "sent_lan_commands: %u\n",
1826 ipmi_get_stat(intf, sent_lan_commands));
1827 out += sprintf(out, "sent_lan_command_errs: %u\n",
1828 ipmi_get_stat(intf, sent_lan_command_errs));
1829 out += sprintf(out, "retransmitted_lan_commands: %u\n",
1830 ipmi_get_stat(intf, retransmitted_lan_commands));
1831 out += sprintf(out, "timed_out_lan_commands: %u\n",
1832 ipmi_get_stat(intf, timed_out_lan_commands));
1833 out += sprintf(out, "sent_lan_responses: %u\n",
1834 ipmi_get_stat(intf, sent_lan_responses));
1835 out += sprintf(out, "handled_lan_responses: %u\n",
1836 ipmi_get_stat(intf, handled_lan_responses));
1837 out += sprintf(out, "invalid_lan_responses: %u\n",
1838 ipmi_get_stat(intf, invalid_lan_responses));
1839 out += sprintf(out, "unhandled_lan_responses: %u\n",
1840 ipmi_get_stat(intf, unhandled_lan_responses));
1841 out += sprintf(out, "handled_commands: %u\n",
1842 ipmi_get_stat(intf, handled_commands));
1843 out += sprintf(out, "invalid_commands: %u\n",
1844 ipmi_get_stat(intf, invalid_commands));
1845 out += sprintf(out, "unhandled_commands: %u\n",
1846 ipmi_get_stat(intf, unhandled_commands));
1847 out += sprintf(out, "invalid_events: %u\n",
1848 ipmi_get_stat(intf, invalid_events));
1849 out += sprintf(out, "events: %u\n",
1850 ipmi_get_stat(intf, events));
1852 return (out - ((char *) page));
1854 #endif /* CONFIG_PROC_FS */
1856 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1857 read_proc_t *read_proc, write_proc_t *write_proc,
1858 void *data, struct module *owner)
1861 #ifdef CONFIG_PROC_FS
1862 struct proc_dir_entry *file;
1863 struct ipmi_proc_entry *entry;
1865 /* Create a list element. */
1866 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1869 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1874 strcpy(entry->name, name);
1876 file = create_proc_entry(name, 0, smi->proc_dir);
1883 file->read_proc = read_proc;
1884 file->write_proc = write_proc;
1885 file->owner = owner;
1887 mutex_lock(&smi->proc_entry_lock);
1888 /* Stick it on the list. */
1889 entry->next = smi->proc_entries;
1890 smi->proc_entries = entry;
1891 mutex_unlock(&smi->proc_entry_lock);
1893 #endif /* CONFIG_PROC_FS */
1898 static int add_proc_entries(ipmi_smi_t smi, int num)
1902 #ifdef CONFIG_PROC_FS
1903 sprintf(smi->proc_dir_name, "%d", num);
1904 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1908 smi->proc_dir->owner = THIS_MODULE;
1912 rv = ipmi_smi_add_proc_entry(smi, "stats",
1913 stat_file_read_proc, NULL,
1917 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1918 ipmb_file_read_proc, NULL,
1922 rv = ipmi_smi_add_proc_entry(smi, "version",
1923 version_file_read_proc, NULL,
1925 #endif /* CONFIG_PROC_FS */
1930 static void remove_proc_entries(ipmi_smi_t smi)
1932 #ifdef CONFIG_PROC_FS
1933 struct ipmi_proc_entry *entry;
1935 mutex_lock(&smi->proc_entry_lock);
1936 while (smi->proc_entries) {
1937 entry = smi->proc_entries;
1938 smi->proc_entries = entry->next;
1940 remove_proc_entry(entry->name, smi->proc_dir);
1944 mutex_unlock(&smi->proc_entry_lock);
1945 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1946 #endif /* CONFIG_PROC_FS */
1949 static int __find_bmc_guid(struct device *dev, void *data)
1951 unsigned char *id = data;
1952 struct bmc_device *bmc = dev_get_drvdata(dev);
1953 return memcmp(bmc->guid, id, 16) == 0;
1956 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1957 unsigned char *guid)
1961 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1963 return dev_get_drvdata(dev);
1968 struct prod_dev_id {
1969 unsigned int product_id;
1970 unsigned char device_id;
1973 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1975 struct prod_dev_id *id = data;
1976 struct bmc_device *bmc = dev_get_drvdata(dev);
1978 return (bmc->id.product_id == id->product_id
1979 && bmc->id.device_id == id->device_id);
1982 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1983 struct device_driver *drv,
1984 unsigned int product_id, unsigned char device_id)
1986 struct prod_dev_id id = {
1987 .product_id = product_id,
1988 .device_id = device_id,
1992 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1994 return dev_get_drvdata(dev);
1999 static ssize_t device_id_show(struct device *dev,
2000 struct device_attribute *attr,
2003 struct bmc_device *bmc = dev_get_drvdata(dev);
2005 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2008 static ssize_t provides_dev_sdrs_show(struct device *dev,
2009 struct device_attribute *attr,
2012 struct bmc_device *bmc = dev_get_drvdata(dev);
2014 return snprintf(buf, 10, "%u\n",
2015 (bmc->id.device_revision & 0x80) >> 7);
2018 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2021 struct bmc_device *bmc = dev_get_drvdata(dev);
2023 return snprintf(buf, 20, "%u\n",
2024 bmc->id.device_revision & 0x0F);
2027 static ssize_t firmware_rev_show(struct device *dev,
2028 struct device_attribute *attr,
2031 struct bmc_device *bmc = dev_get_drvdata(dev);
2033 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2034 bmc->id.firmware_revision_2);
2037 static ssize_t ipmi_version_show(struct device *dev,
2038 struct device_attribute *attr,
2041 struct bmc_device *bmc = dev_get_drvdata(dev);
2043 return snprintf(buf, 20, "%u.%u\n",
2044 ipmi_version_major(&bmc->id),
2045 ipmi_version_minor(&bmc->id));
2048 static ssize_t add_dev_support_show(struct device *dev,
2049 struct device_attribute *attr,
2052 struct bmc_device *bmc = dev_get_drvdata(dev);
2054 return snprintf(buf, 10, "0x%02x\n",
2055 bmc->id.additional_device_support);
2058 static ssize_t manufacturer_id_show(struct device *dev,
2059 struct device_attribute *attr,
2062 struct bmc_device *bmc = dev_get_drvdata(dev);
2064 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2067 static ssize_t product_id_show(struct device *dev,
2068 struct device_attribute *attr,
2071 struct bmc_device *bmc = dev_get_drvdata(dev);
2073 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2076 static ssize_t aux_firmware_rev_show(struct device *dev,
2077 struct device_attribute *attr,
2080 struct bmc_device *bmc = dev_get_drvdata(dev);
2082 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2083 bmc->id.aux_firmware_revision[3],
2084 bmc->id.aux_firmware_revision[2],
2085 bmc->id.aux_firmware_revision[1],
2086 bmc->id.aux_firmware_revision[0]);
2089 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2092 struct bmc_device *bmc = dev_get_drvdata(dev);
2094 return snprintf(buf, 100, "%Lx%Lx\n",
2095 (long long) bmc->guid[0],
2096 (long long) bmc->guid[8]);
2099 static void remove_files(struct bmc_device *bmc)
2104 device_remove_file(&bmc->dev->dev,
2105 &bmc->device_id_attr);
2106 device_remove_file(&bmc->dev->dev,
2107 &bmc->provides_dev_sdrs_attr);
2108 device_remove_file(&bmc->dev->dev,
2109 &bmc->revision_attr);
2110 device_remove_file(&bmc->dev->dev,
2111 &bmc->firmware_rev_attr);
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->version_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->add_dev_support_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->manufacturer_id_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->product_id_attr);
2121 if (bmc->id.aux_firmware_revision_set)
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->aux_firmware_rev_attr);
2125 device_remove_file(&bmc->dev->dev,
2130 cleanup_bmc_device(struct kref *ref)
2132 struct bmc_device *bmc;
2134 bmc = container_of(ref, struct bmc_device, refcount);
2137 platform_device_unregister(bmc->dev);
2141 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2143 struct bmc_device *bmc = intf->bmc;
2145 if (intf->sysfs_name) {
2146 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2147 kfree(intf->sysfs_name);
2148 intf->sysfs_name = NULL;
2150 if (intf->my_dev_name) {
2151 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2152 kfree(intf->my_dev_name);
2153 intf->my_dev_name = NULL;
2156 mutex_lock(&ipmidriver_mutex);
2157 kref_put(&bmc->refcount, cleanup_bmc_device);
2159 mutex_unlock(&ipmidriver_mutex);
2162 static int create_files(struct bmc_device *bmc)
2166 bmc->device_id_attr.attr.name = "device_id";
2167 bmc->device_id_attr.attr.mode = S_IRUGO;
2168 bmc->device_id_attr.show = device_id_show;
2170 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2171 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2172 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2174 bmc->revision_attr.attr.name = "revision";
2175 bmc->revision_attr.attr.mode = S_IRUGO;
2176 bmc->revision_attr.show = revision_show;
2178 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2179 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2180 bmc->firmware_rev_attr.show = firmware_rev_show;
2182 bmc->version_attr.attr.name = "ipmi_version";
2183 bmc->version_attr.attr.mode = S_IRUGO;
2184 bmc->version_attr.show = ipmi_version_show;
2186 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2187 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2188 bmc->add_dev_support_attr.show = add_dev_support_show;
2190 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2191 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2192 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2194 bmc->product_id_attr.attr.name = "product_id";
2195 bmc->product_id_attr.attr.mode = S_IRUGO;
2196 bmc->product_id_attr.show = product_id_show;
2198 bmc->guid_attr.attr.name = "guid";
2199 bmc->guid_attr.attr.mode = S_IRUGO;
2200 bmc->guid_attr.show = guid_show;
2202 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2203 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2204 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2206 err = device_create_file(&bmc->dev->dev,
2207 &bmc->device_id_attr);
2209 err = device_create_file(&bmc->dev->dev,
2210 &bmc->provides_dev_sdrs_attr);
2211 if (err) goto out_devid;
2212 err = device_create_file(&bmc->dev->dev,
2213 &bmc->revision_attr);
2214 if (err) goto out_sdrs;
2215 err = device_create_file(&bmc->dev->dev,
2216 &bmc->firmware_rev_attr);
2217 if (err) goto out_rev;
2218 err = device_create_file(&bmc->dev->dev,
2219 &bmc->version_attr);
2220 if (err) goto out_firm;
2221 err = device_create_file(&bmc->dev->dev,
2222 &bmc->add_dev_support_attr);
2223 if (err) goto out_version;
2224 err = device_create_file(&bmc->dev->dev,
2225 &bmc->manufacturer_id_attr);
2226 if (err) goto out_add_dev;
2227 err = device_create_file(&bmc->dev->dev,
2228 &bmc->product_id_attr);
2229 if (err) goto out_manu;
2230 if (bmc->id.aux_firmware_revision_set) {
2231 err = device_create_file(&bmc->dev->dev,
2232 &bmc->aux_firmware_rev_attr);
2233 if (err) goto out_prod_id;
2235 if (bmc->guid_set) {
2236 err = device_create_file(&bmc->dev->dev,
2238 if (err) goto out_aux_firm;
2244 if (bmc->id.aux_firmware_revision_set)
2245 device_remove_file(&bmc->dev->dev,
2246 &bmc->aux_firmware_rev_attr);
2248 device_remove_file(&bmc->dev->dev,
2249 &bmc->product_id_attr);
2251 device_remove_file(&bmc->dev->dev,
2252 &bmc->manufacturer_id_attr);
2254 device_remove_file(&bmc->dev->dev,
2255 &bmc->add_dev_support_attr);
2257 device_remove_file(&bmc->dev->dev,
2258 &bmc->version_attr);
2260 device_remove_file(&bmc->dev->dev,
2261 &bmc->firmware_rev_attr);
2263 device_remove_file(&bmc->dev->dev,
2264 &bmc->revision_attr);
2266 device_remove_file(&bmc->dev->dev,
2267 &bmc->provides_dev_sdrs_attr);
2269 device_remove_file(&bmc->dev->dev,
2270 &bmc->device_id_attr);
2275 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2276 const char *sysfs_name)
2279 struct bmc_device *bmc = intf->bmc;
2280 struct bmc_device *old_bmc;
2284 mutex_lock(&ipmidriver_mutex);
2287 * Try to find if there is an bmc_device struct
2288 * representing the interfaced BMC already
2291 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2293 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2298 * If there is already an bmc_device, free the new one,
2299 * otherwise register the new BMC device
2303 intf->bmc = old_bmc;
2306 kref_get(&bmc->refcount);
2307 mutex_unlock(&ipmidriver_mutex);
2310 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2311 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2312 bmc->id.manufacturer_id,
2317 unsigned char orig_dev_id = bmc->id.device_id;
2318 int warn_printed = 0;
2320 snprintf(name, sizeof(name),
2321 "ipmi_bmc.%4.4x", bmc->id.product_id);
2323 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2325 bmc->id.device_id)) {
2326 if (!warn_printed) {
2327 printk(KERN_WARNING PFX
2328 "This machine has two different BMCs"
2329 " with the same product id and device"
2330 " id. This is an error in the"
2331 " firmware, but incrementing the"
2332 " device id to work around the problem."
2333 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2334 bmc->id.product_id, bmc->id.device_id);
2337 bmc->id.device_id++; /* Wraps at 255 */
2338 if (bmc->id.device_id == orig_dev_id) {
2340 "Out of device ids!\n");
2345 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2347 mutex_unlock(&ipmidriver_mutex);
2350 " Unable to allocate platform device\n");
2353 bmc->dev->dev.driver = &ipmidriver;
2354 dev_set_drvdata(&bmc->dev->dev, bmc);
2355 kref_init(&bmc->refcount);
2357 rv = platform_device_add(bmc->dev);
2358 mutex_unlock(&ipmidriver_mutex);
2360 platform_device_put(bmc->dev);
2364 " Unable to register bmc device: %d\n",
2366 /* Don't go to out_err, you can only do that if
2367 the device is registered already. */
2371 rv = create_files(bmc);
2373 mutex_lock(&ipmidriver_mutex);
2374 platform_device_unregister(bmc->dev);
2375 mutex_unlock(&ipmidriver_mutex);
2381 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2382 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2383 bmc->id.manufacturer_id,
2389 * create symlink from system interface device to bmc device
2392 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2393 if (!intf->sysfs_name) {
2396 "ipmi_msghandler: allocate link to BMC: %d\n",
2401 rv = sysfs_create_link(&intf->si_dev->kobj,
2402 &bmc->dev->dev.kobj, intf->sysfs_name);
2404 kfree(intf->sysfs_name);
2405 intf->sysfs_name = NULL;
2407 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2412 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2413 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2414 if (!intf->my_dev_name) {
2415 kfree(intf->sysfs_name);
2416 intf->sysfs_name = NULL;
2419 "ipmi_msghandler: allocate link from BMC: %d\n",
2423 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2425 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2428 kfree(intf->sysfs_name);
2429 intf->sysfs_name = NULL;
2430 kfree(intf->my_dev_name);
2431 intf->my_dev_name = NULL;
2434 " Unable to create symlink to bmc: %d\n",
2442 ipmi_bmc_unregister(intf);
2447 send_guid_cmd(ipmi_smi_t intf, int chan)
2449 struct kernel_ipmi_msg msg;
2450 struct ipmi_system_interface_addr si;
2452 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2453 si.channel = IPMI_BMC_CHANNEL;
2456 msg.netfn = IPMI_NETFN_APP_REQUEST;
2457 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2460 return i_ipmi_request(NULL,
2462 (struct ipmi_addr *) &si,
2469 intf->channels[0].address,
2470 intf->channels[0].lun,
2475 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2477 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2478 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2479 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2483 if (msg->msg.data[0] != 0) {
2484 /* Error from getting the GUID, the BMC doesn't have one. */
2485 intf->bmc->guid_set = 0;
2489 if (msg->msg.data_len < 17) {
2490 intf->bmc->guid_set = 0;
2491 printk(KERN_WARNING PFX
2492 "guid_handler: The GUID response from the BMC was too"
2493 " short, it was %d but should have been 17. Assuming"
2494 " GUID is not available.\n",
2499 memcpy(intf->bmc->guid, msg->msg.data, 16);
2500 intf->bmc->guid_set = 1;
2502 wake_up(&intf->waitq);
2506 get_guid(ipmi_smi_t intf)
2510 intf->bmc->guid_set = 0x2;
2511 intf->null_user_handler = guid_handler;
2512 rv = send_guid_cmd(intf, 0);
2514 /* Send failed, no GUID available. */
2515 intf->bmc->guid_set = 0;
2516 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2517 intf->null_user_handler = NULL;
2521 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2523 struct kernel_ipmi_msg msg;
2524 unsigned char data[1];
2525 struct ipmi_system_interface_addr si;
2527 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2528 si.channel = IPMI_BMC_CHANNEL;
2531 msg.netfn = IPMI_NETFN_APP_REQUEST;
2532 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2536 return i_ipmi_request(NULL,
2538 (struct ipmi_addr *) &si,
2545 intf->channels[0].address,
2546 intf->channels[0].lun,
2551 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2556 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2557 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2558 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2560 /* It's the one we want */
2561 if (msg->msg.data[0] != 0) {
2562 /* Got an error from the channel, just go on. */
2564 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2565 /* If the MC does not support this
2566 command, that is legal. We just
2567 assume it has one IPMB at channel
2569 intf->channels[0].medium
2570 = IPMI_CHANNEL_MEDIUM_IPMB;
2571 intf->channels[0].protocol
2572 = IPMI_CHANNEL_PROTOCOL_IPMB;
2575 intf->curr_channel = IPMI_MAX_CHANNELS;
2576 wake_up(&intf->waitq);
2581 if (msg->msg.data_len < 4) {
2582 /* Message not big enough, just go on. */
2585 chan = intf->curr_channel;
2586 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2587 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2590 intf->curr_channel++;
2591 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2592 wake_up(&intf->waitq);
2594 rv = send_channel_info_cmd(intf, intf->curr_channel);
2597 /* Got an error somehow, just give up. */
2598 intf->curr_channel = IPMI_MAX_CHANNELS;
2599 wake_up(&intf->waitq);
2601 printk(KERN_WARNING PFX
2602 "Error sending channel information: %d\n",
2610 void ipmi_poll_interface(ipmi_user_t user)
2612 ipmi_smi_t intf = user->intf;
2614 if (intf->handlers->poll)
2615 intf->handlers->poll(intf->send_info);
2618 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2620 struct ipmi_device_id *device_id,
2621 struct device *si_dev,
2622 const char *sysfs_name,
2623 unsigned char slave_addr)
2629 struct list_head *link;
2631 /* Make sure the driver is actually initialized, this handles
2632 problems with initialization order. */
2634 rv = ipmi_init_msghandler();
2637 /* The init code doesn't return an error if it was turned
2638 off, but it won't initialize. Check that. */
2643 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2647 intf->ipmi_version_major = ipmi_version_major(device_id);
2648 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2650 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2655 intf->intf_num = -1; /* Mark it invalid for now. */
2656 kref_init(&intf->refcount);
2657 intf->bmc->id = *device_id;
2658 intf->si_dev = si_dev;
2659 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2660 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2661 intf->channels[j].lun = 2;
2663 if (slave_addr != 0)
2664 intf->channels[0].address = slave_addr;
2665 INIT_LIST_HEAD(&intf->users);
2666 intf->handlers = handlers;
2667 intf->send_info = send_info;
2668 spin_lock_init(&intf->seq_lock);
2669 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2670 intf->seq_table[j].inuse = 0;
2671 intf->seq_table[j].seqid = 0;
2674 #ifdef CONFIG_PROC_FS
2675 mutex_init(&intf->proc_entry_lock);
2677 spin_lock_init(&intf->waiting_msgs_lock);
2678 INIT_LIST_HEAD(&intf->waiting_msgs);
2679 spin_lock_init(&intf->events_lock);
2680 INIT_LIST_HEAD(&intf->waiting_events);
2681 intf->waiting_events_count = 0;
2682 mutex_init(&intf->cmd_rcvrs_mutex);
2683 spin_lock_init(&intf->maintenance_mode_lock);
2684 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2685 init_waitqueue_head(&intf->waitq);
2686 for (i = 0; i < IPMI_NUM_STATS; i++)
2687 atomic_set(&intf->stats[i], 0);
2689 intf->proc_dir = NULL;
2691 mutex_lock(&smi_watchers_mutex);
2692 mutex_lock(&ipmi_interfaces_mutex);
2693 /* Look for a hole in the numbers. */
2695 link = &ipmi_interfaces;
2696 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2697 if (tintf->intf_num != i) {
2698 link = &tintf->link;
2703 /* Add the new interface in numeric order. */
2705 list_add_rcu(&intf->link, &ipmi_interfaces);
2707 list_add_tail_rcu(&intf->link, link);
2709 rv = handlers->start_processing(send_info, intf);
2715 if ((intf->ipmi_version_major > 1)
2716 || ((intf->ipmi_version_major == 1)
2717 && (intf->ipmi_version_minor >= 5)))
2719 /* Start scanning the channels to see what is
2721 intf->null_user_handler = channel_handler;
2722 intf->curr_channel = 0;
2723 rv = send_channel_info_cmd(intf, 0);
2727 /* Wait for the channel info to be read. */
2728 wait_event(intf->waitq,
2729 intf->curr_channel >= IPMI_MAX_CHANNELS);
2730 intf->null_user_handler = NULL;
2732 /* Assume a single IPMB channel at zero. */
2733 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2734 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2738 rv = add_proc_entries(intf, i);
2740 rv = ipmi_bmc_register(intf, i, sysfs_name);
2745 remove_proc_entries(intf);
2746 intf->handlers = NULL;
2747 list_del_rcu(&intf->link);
2748 mutex_unlock(&ipmi_interfaces_mutex);
2749 mutex_unlock(&smi_watchers_mutex);
2751 kref_put(&intf->refcount, intf_free);
2754 * Keep memory order straight for RCU readers. Make
2755 * sure everything else is committed to memory before
2756 * setting intf_num to mark the interface valid.
2760 mutex_unlock(&ipmi_interfaces_mutex);
2761 /* After this point the interface is legal to use. */
2762 call_smi_watchers(i, intf->si_dev);
2763 mutex_unlock(&smi_watchers_mutex);
2769 static void cleanup_smi_msgs(ipmi_smi_t intf)
2772 struct seq_table *ent;
2774 /* No need for locks, the interface is down. */
2775 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2776 ent = &(intf->seq_table[i]);
2779 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2783 int ipmi_unregister_smi(ipmi_smi_t intf)
2785 struct ipmi_smi_watcher *w;
2786 int intf_num = intf->intf_num;
2788 ipmi_bmc_unregister(intf);
2790 mutex_lock(&smi_watchers_mutex);
2791 mutex_lock(&ipmi_interfaces_mutex);
2792 intf->intf_num = -1;
2793 intf->handlers = NULL;
2794 list_del_rcu(&intf->link);
2795 mutex_unlock(&ipmi_interfaces_mutex);
2798 cleanup_smi_msgs(intf);
2800 remove_proc_entries(intf);
2802 /* Call all the watcher interfaces to tell them that
2803 an interface is gone. */
2804 list_for_each_entry(w, &smi_watchers, link)
2805 w->smi_gone(intf_num);
2806 mutex_unlock(&smi_watchers_mutex);
2808 kref_put(&intf->refcount, intf_free);
2812 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2813 struct ipmi_smi_msg *msg)
2815 struct ipmi_ipmb_addr ipmb_addr;
2816 struct ipmi_recv_msg *recv_msg;
2819 /* This is 11, not 10, because the response must contain a
2820 * completion code. */
2821 if (msg->rsp_size < 11) {
2822 /* Message not big enough, just ignore it. */
2823 ipmi_inc_stat(intf, invalid_ipmb_responses);
2827 if (msg->rsp[2] != 0) {
2828 /* An error getting the response, just ignore it. */
2832 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2833 ipmb_addr.slave_addr = msg->rsp[6];
2834 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2835 ipmb_addr.lun = msg->rsp[7] & 3;
2837 /* It's a response from a remote entity. Look up the sequence
2838 number and handle the response. */
2839 if (intf_find_seq(intf,
2843 (msg->rsp[4] >> 2) & (~1),
2844 (struct ipmi_addr *) &(ipmb_addr),
2847 /* We were unable to find the sequence number,
2848 so just nuke the message. */
2849 ipmi_inc_stat(intf, unhandled_ipmb_responses);
2853 memcpy(recv_msg->msg_data,
2856 /* THe other fields matched, so no need to set them, except
2857 for netfn, which needs to be the response that was
2858 returned, not the request value. */
2859 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2860 recv_msg->msg.data = recv_msg->msg_data;
2861 recv_msg->msg.data_len = msg->rsp_size - 10;
2862 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2863 ipmi_inc_stat(intf, handled_ipmb_responses);
2864 deliver_response(recv_msg);
2869 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2870 struct ipmi_smi_msg *msg)
2872 struct cmd_rcvr *rcvr;
2874 unsigned char netfn;
2877 ipmi_user_t user = NULL;
2878 struct ipmi_ipmb_addr *ipmb_addr;
2879 struct ipmi_recv_msg *recv_msg;
2880 struct ipmi_smi_handlers *handlers;
2882 if (msg->rsp_size < 10) {
2883 /* Message not big enough, just ignore it. */
2884 ipmi_inc_stat(intf, invalid_commands);
2888 if (msg->rsp[2] != 0) {
2889 /* An error getting the response, just ignore it. */
2893 netfn = msg->rsp[4] >> 2;
2895 chan = msg->rsp[3] & 0xf;
2898 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2901 kref_get(&user->refcount);
2907 /* We didn't find a user, deliver an error response. */
2908 ipmi_inc_stat(intf, unhandled_commands);
2910 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2911 msg->data[1] = IPMI_SEND_MSG_CMD;
2912 msg->data[2] = msg->rsp[3];
2913 msg->data[3] = msg->rsp[6];
2914 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2915 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2916 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2918 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2919 msg->data[8] = msg->rsp[8]; /* cmd */
2920 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2921 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2922 msg->data_size = 11;
2927 printk("Invalid command:");
2928 for (m = 0; m < msg->data_size; m++)
2929 printk(" %2.2x", msg->data[m]);
2934 handlers = intf->handlers;
2936 handlers->sender(intf->send_info, msg, 0);
2937 /* We used the message, so return the value
2938 that causes it to not be freed or
2944 /* Deliver the message to the user. */
2945 ipmi_inc_stat(intf, handled_commands);
2947 recv_msg = ipmi_alloc_recv_msg();
2949 /* We couldn't allocate memory for the
2950 message, so requeue it for handling
2953 kref_put(&user->refcount, free_user);
2955 /* Extract the source address from the data. */
2956 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2957 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2958 ipmb_addr->slave_addr = msg->rsp[6];
2959 ipmb_addr->lun = msg->rsp[7] & 3;
2960 ipmb_addr->channel = msg->rsp[3] & 0xf;
2962 /* Extract the rest of the message information
2963 from the IPMB header.*/
2964 recv_msg->user = user;
2965 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2966 recv_msg->msgid = msg->rsp[7] >> 2;
2967 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2968 recv_msg->msg.cmd = msg->rsp[8];
2969 recv_msg->msg.data = recv_msg->msg_data;
2971 /* We chop off 10, not 9 bytes because the checksum
2972 at the end also needs to be removed. */
2973 recv_msg->msg.data_len = msg->rsp_size - 10;
2974 memcpy(recv_msg->msg_data,
2976 msg->rsp_size - 10);
2977 deliver_response(recv_msg);
2984 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2985 struct ipmi_smi_msg *msg)
2987 struct ipmi_lan_addr lan_addr;
2988 struct ipmi_recv_msg *recv_msg;
2991 /* This is 13, not 12, because the response must contain a
2992 * completion code. */
2993 if (msg->rsp_size < 13) {
2994 /* Message not big enough, just ignore it. */
2995 ipmi_inc_stat(intf, invalid_lan_responses);
2999 if (msg->rsp[2] != 0) {
3000 /* An error getting the response, just ignore it. */
3004 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3005 lan_addr.session_handle = msg->rsp[4];
3006 lan_addr.remote_SWID = msg->rsp[8];
3007 lan_addr.local_SWID = msg->rsp[5];
3008 lan_addr.channel = msg->rsp[3] & 0x0f;
3009 lan_addr.privilege = msg->rsp[3] >> 4;
3010 lan_addr.lun = msg->rsp[9] & 3;
3012 /* It's a response from a remote entity. Look up the sequence
3013 number and handle the response. */
3014 if (intf_find_seq(intf,
3018 (msg->rsp[6] >> 2) & (~1),
3019 (struct ipmi_addr *) &(lan_addr),
3022 /* We were unable to find the sequence number,
3023 so just nuke the message. */
3024 ipmi_inc_stat(intf, unhandled_lan_responses);
3028 memcpy(recv_msg->msg_data,
3030 msg->rsp_size - 11);
3031 /* The other fields matched, so no need to set them, except
3032 for netfn, which needs to be the response that was
3033 returned, not the request value. */
3034 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3035 recv_msg->msg.data = recv_msg->msg_data;
3036 recv_msg->msg.data_len = msg->rsp_size - 12;
3037 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3038 ipmi_inc_stat(intf, handled_lan_responses);
3039 deliver_response(recv_msg);
3044 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3045 struct ipmi_smi_msg *msg)
3047 struct cmd_rcvr *rcvr;
3049 unsigned char netfn;
3052 ipmi_user_t user = NULL;
3053 struct ipmi_lan_addr *lan_addr;
3054 struct ipmi_recv_msg *recv_msg;
3056 if (msg->rsp_size < 12) {
3057 /* Message not big enough, just ignore it. */
3058 ipmi_inc_stat(intf, invalid_commands);
3062 if (msg->rsp[2] != 0) {
3063 /* An error getting the response, just ignore it. */
3067 netfn = msg->rsp[6] >> 2;
3069 chan = msg->rsp[3] & 0xf;
3072 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3075 kref_get(&user->refcount);
3081 /* We didn't find a user, just give up. */
3082 ipmi_inc_stat(intf, unhandled_commands);
3084 rv = 0; /* Don't do anything with these messages, just
3085 allow them to be freed. */
3087 /* Deliver the message to the user. */
3088 ipmi_inc_stat(intf, handled_commands);
3090 recv_msg = ipmi_alloc_recv_msg();
3092 /* We couldn't allocate memory for the
3093 message, so requeue it for handling
3096 kref_put(&user->refcount, free_user);
3098 /* Extract the source address from the data. */
3099 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3100 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3101 lan_addr->session_handle = msg->rsp[4];
3102 lan_addr->remote_SWID = msg->rsp[8];
3103 lan_addr->local_SWID = msg->rsp[5];
3104 lan_addr->lun = msg->rsp[9] & 3;
3105 lan_addr->channel = msg->rsp[3] & 0xf;
3106 lan_addr->privilege = msg->rsp[3] >> 4;
3108 /* Extract the rest of the message information
3109 from the IPMB header.*/
3110 recv_msg->user = user;
3111 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3112 recv_msg->msgid = msg->rsp[9] >> 2;
3113 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3114 recv_msg->msg.cmd = msg->rsp[10];
3115 recv_msg->msg.data = recv_msg->msg_data;
3117 /* We chop off 12, not 11 bytes because the checksum
3118 at the end also needs to be removed. */
3119 recv_msg->msg.data_len = msg->rsp_size - 12;
3120 memcpy(recv_msg->msg_data,
3122 msg->rsp_size - 12);
3123 deliver_response(recv_msg);
3130 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3131 struct ipmi_smi_msg *msg)
3133 struct ipmi_system_interface_addr *smi_addr;
3135 recv_msg->msgid = 0;
3136 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3137 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3138 smi_addr->channel = IPMI_BMC_CHANNEL;
3139 smi_addr->lun = msg->rsp[0] & 3;
3140 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3141 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3142 recv_msg->msg.cmd = msg->rsp[1];
3143 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3144 recv_msg->msg.data = recv_msg->msg_data;
3145 recv_msg->msg.data_len = msg->rsp_size - 3;
3148 static int handle_read_event_rsp(ipmi_smi_t intf,
3149 struct ipmi_smi_msg *msg)
3151 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3152 struct list_head msgs;
3155 int deliver_count = 0;
3156 unsigned long flags;
3158 if (msg->rsp_size < 19) {
3159 /* Message is too small to be an IPMB event. */
3160 ipmi_inc_stat(intf, invalid_events);
3164 if (msg->rsp[2] != 0) {
3165 /* An error getting the event, just ignore it. */
3169 INIT_LIST_HEAD(&msgs);
3171 spin_lock_irqsave(&intf->events_lock, flags);
3173 ipmi_inc_stat(intf, events);
3175 /* Allocate and fill in one message for every user that is getting
3178 list_for_each_entry_rcu(user, &intf->users, link) {
3179 if (!user->gets_events)
3182 recv_msg = ipmi_alloc_recv_msg();
3185 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3187 list_del(&recv_msg->link);
3188 ipmi_free_recv_msg(recv_msg);
3190 /* We couldn't allocate memory for the
3191 message, so requeue it for handling
3199 copy_event_into_recv_msg(recv_msg, msg);
3200 recv_msg->user = user;
3201 kref_get(&user->refcount);
3202 list_add_tail(&(recv_msg->link), &msgs);
3206 if (deliver_count) {
3207 /* Now deliver all the messages. */
3208 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3209 list_del(&recv_msg->link);
3210 deliver_response(recv_msg);
3212 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3213 /* No one to receive the message, put it in queue if there's
3214 not already too many things in the queue. */
3215 recv_msg = ipmi_alloc_recv_msg();
3217 /* We couldn't allocate memory for the
3218 message, so requeue it for handling
3224 copy_event_into_recv_msg(recv_msg, msg);
3225 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3226 intf->waiting_events_count++;
3227 } else if (!intf->event_msg_printed) {
3228 /* There's too many things in the queue, discard this
3230 printk(KERN_WARNING PFX "Event queue full, discarding"
3231 " incoming events\n");
3232 intf->event_msg_printed = 1;
3236 spin_unlock_irqrestore(&(intf->events_lock), flags);
3241 static int handle_bmc_rsp(ipmi_smi_t intf,
3242 struct ipmi_smi_msg *msg)
3244 struct ipmi_recv_msg *recv_msg;
3245 struct ipmi_user *user;
3247 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3248 if (recv_msg == NULL)
3250 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3251 "could be because of a malformed message, or\n"
3252 "because of a hardware error. Contact your\n"
3253 "hardware vender for assistance\n");
3257 user = recv_msg->user;
3258 /* Make sure the user still exists. */
3259 if (user && !user->valid) {
3260 /* The user for the message went away, so give up. */
3261 ipmi_inc_stat(intf, unhandled_local_responses);
3262 ipmi_free_recv_msg(recv_msg);
3264 struct ipmi_system_interface_addr *smi_addr;
3266 ipmi_inc_stat(intf, handled_local_responses);
3267 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3268 recv_msg->msgid = msg->msgid;
3269 smi_addr = ((struct ipmi_system_interface_addr *)
3271 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3272 smi_addr->channel = IPMI_BMC_CHANNEL;
3273 smi_addr->lun = msg->rsp[0] & 3;
3274 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3275 recv_msg->msg.cmd = msg->rsp[1];
3276 memcpy(recv_msg->msg_data,
3279 recv_msg->msg.data = recv_msg->msg_data;
3280 recv_msg->msg.data_len = msg->rsp_size - 2;
3281 deliver_response(recv_msg);
3287 /* Handle a new message. Return 1 if the message should be requeued,
3288 0 if the message should be freed, or -1 if the message should not
3289 be freed or requeued. */
3290 static int handle_new_recv_msg(ipmi_smi_t intf,
3291 struct ipmi_smi_msg *msg)
3299 for (m = 0; m < msg->rsp_size; m++)
3300 printk(" %2.2x", msg->rsp[m]);
3303 if (msg->rsp_size < 2) {
3304 /* Message is too small to be correct. */
3305 printk(KERN_WARNING PFX "BMC returned to small a message"
3306 " for netfn %x cmd %x, got %d bytes\n",
3307 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3309 /* Generate an error response for the message. */
3310 msg->rsp[0] = msg->data[0] | (1 << 2);
3311 msg->rsp[1] = msg->data[1];
3312 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3314 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3315 || (msg->rsp[1] != msg->data[1])) /* Command */
3317 /* The response is not even marginally correct. */
3318 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3319 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3320 (msg->data[0] >> 2) | 1, msg->data[1],
3321 msg->rsp[0] >> 2, msg->rsp[1]);
3323 /* Generate an error response for the message. */
3324 msg->rsp[0] = msg->data[0] | (1 << 2);
3325 msg->rsp[1] = msg->data[1];
3326 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3330 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3331 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3332 && (msg->user_data != NULL))
3334 /* It's a response to a response we sent. For this we
3335 deliver a send message response to the user. */
3336 struct ipmi_recv_msg *recv_msg = msg->user_data;
3339 if (msg->rsp_size < 2)
3340 /* Message is too small to be correct. */
3343 chan = msg->data[2] & 0x0f;
3344 if (chan >= IPMI_MAX_CHANNELS)
3345 /* Invalid channel number */
3351 /* Make sure the user still exists. */
3352 if (!recv_msg->user || !recv_msg->user->valid)
3355 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3356 recv_msg->msg.data = recv_msg->msg_data;
3357 recv_msg->msg.data_len = 1;
3358 recv_msg->msg_data[0] = msg->rsp[2];
3359 deliver_response(recv_msg);
3360 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3361 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3363 /* It's from the receive queue. */
3364 chan = msg->rsp[3] & 0xf;
3365 if (chan >= IPMI_MAX_CHANNELS) {
3366 /* Invalid channel number */
3371 switch (intf->channels[chan].medium) {
3372 case IPMI_CHANNEL_MEDIUM_IPMB:
3373 if (msg->rsp[4] & 0x04) {
3374 /* It's a response, so find the
3375 requesting message and send it up. */
3376 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3378 /* It's a command to the SMS from some other
3379 entity. Handle that. */
3380 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3384 case IPMI_CHANNEL_MEDIUM_8023LAN:
3385 case IPMI_CHANNEL_MEDIUM_ASYNC:
3386 if (msg->rsp[6] & 0x04) {
3387 /* It's a response, so find the
3388 requesting message and send it up. */
3389 requeue = handle_lan_get_msg_rsp(intf, msg);
3391 /* It's a command to the SMS from some other
3392 entity. Handle that. */
3393 requeue = handle_lan_get_msg_cmd(intf, msg);
3398 /* We don't handle the channel type, so just
3399 * free the message. */
3403 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3404 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3406 /* It's an asyncronous event. */
3407 requeue = handle_read_event_rsp(intf, msg);
3409 /* It's a response from the local BMC. */
3410 requeue = handle_bmc_rsp(intf, msg);
3417 /* Handle a new message from the lower layer. */
3418 void ipmi_smi_msg_received(ipmi_smi_t intf,
3419 struct ipmi_smi_msg *msg)
3421 unsigned long flags = 0; /* keep us warning-free. */
3423 int run_to_completion;
3426 if ((msg->data_size >= 2)
3427 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3428 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3429 && (msg->user_data == NULL))
3431 /* This is the local response to a command send, start
3432 the timer for these. The user_data will not be
3433 NULL if this is a response send, and we will let
3434 response sends just go through. */
3436 /* Check for errors, if we get certain errors (ones
3437 that mean basically we can try again later), we
3438 ignore them and start the timer. Otherwise we
3439 report the error immediately. */
3440 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3441 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3442 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3443 && (msg->rsp[2] != IPMI_BUS_ERR)
3444 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3446 int chan = msg->rsp[3] & 0xf;
3448 /* Got an error sending the message, handle it. */
3449 if (chan >= IPMI_MAX_CHANNELS)
3450 ; /* This shouldn't happen */
3451 else if ((intf->channels[chan].medium
3452 == IPMI_CHANNEL_MEDIUM_8023LAN)
3453 || (intf->channels[chan].medium
3454 == IPMI_CHANNEL_MEDIUM_ASYNC))
3455 ipmi_inc_stat(intf, sent_lan_command_errs);
3457 ipmi_inc_stat(intf, sent_ipmb_command_errs);
3458 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3460 /* The message was sent, start the timer. */
3461 intf_start_seq_timer(intf, msg->msgid);
3464 ipmi_free_smi_msg(msg);
3468 /* To preserve message order, if the list is not empty, we
3469 tack this message onto the end of the list. */
3470 run_to_completion = intf->run_to_completion;
3471 if (!run_to_completion)
3472 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3473 if (!list_empty(&intf->waiting_msgs)) {
3474 list_add_tail(&msg->link, &intf->waiting_msgs);
3475 if (!run_to_completion)
3476 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3479 if (!run_to_completion)
3480 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3482 rv = handle_new_recv_msg(intf, msg);
3484 /* Could not handle the message now, just add it to a
3485 list to handle later. */
3486 run_to_completion = intf->run_to_completion;
3487 if (!run_to_completion)
3488 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3489 list_add_tail(&msg->link, &intf->waiting_msgs);
3490 if (!run_to_completion)
3491 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3492 } else if (rv == 0) {
3493 ipmi_free_smi_msg(msg);
3500 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3505 list_for_each_entry_rcu(user, &intf->users, link) {
3506 if (!user->handler->ipmi_watchdog_pretimeout)
3509 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3515 static struct ipmi_smi_msg *
3516 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3517 unsigned char seq, long seqid)
3519 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3521 /* If we can't allocate the message, then just return, we
3522 get 4 retries, so this should be ok. */
3525 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3526 smi_msg->data_size = recv_msg->msg.data_len;
3527 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3533 for (m = 0; m < smi_msg->data_size; m++)
3534 printk(" %2.2x", smi_msg->data[m]);
3541 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3542 struct list_head *timeouts, long timeout_period,
3543 int slot, unsigned long *flags)
3545 struct ipmi_recv_msg *msg;
3546 struct ipmi_smi_handlers *handlers;
3548 if (intf->intf_num == -1)
3554 ent->timeout -= timeout_period;
3555 if (ent->timeout > 0)
3558 if (ent->retries_left == 0) {
3559 /* The message has used all its retries. */
3561 msg = ent->recv_msg;
3562 list_add_tail(&msg->link, timeouts);
3564 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
3565 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3566 ipmi_inc_stat(intf, timed_out_lan_commands);
3568 ipmi_inc_stat(intf, timed_out_ipmb_commands);
3570 struct ipmi_smi_msg *smi_msg;
3571 /* More retries, send again. */
3573 /* Start with the max timer, set to normal
3574 timer after the message is sent. */
3575 ent->timeout = MAX_MSG_TIMEOUT;
3576 ent->retries_left--;
3577 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3578 ipmi_inc_stat(intf, retransmitted_lan_commands);
3580 ipmi_inc_stat(intf, retransmitted_ipmb_commands);
3582 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3587 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3589 /* Send the new message. We send with a zero
3590 * priority. It timed out, I doubt time is
3591 * that critical now, and high priority
3592 * messages are really only for messages to the
3593 * local MC, which don't get resent. */
3594 handlers = intf->handlers;
3596 intf->handlers->sender(intf->send_info,
3599 ipmi_free_smi_msg(smi_msg);
3601 spin_lock_irqsave(&intf->seq_lock, *flags);
3605 static void ipmi_timeout_handler(long timeout_period)
3608 struct list_head timeouts;
3609 struct ipmi_recv_msg *msg, *msg2;
3610 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3611 unsigned long flags;
3615 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3616 /* See if any waiting messages need to be processed. */
3617 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3618 list_for_each_entry_safe(smi_msg, smi_msg2,
3619 &intf->waiting_msgs, link) {
3620 if (!handle_new_recv_msg(intf, smi_msg)) {
3621 list_del(&smi_msg->link);
3622 ipmi_free_smi_msg(smi_msg);
3624 /* To preserve message order, quit if we
3625 can't handle a message. */
3629 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3631 /* Go through the seq table and find any messages that
3632 have timed out, putting them in the timeouts
3634 INIT_LIST_HEAD(&timeouts);
3635 spin_lock_irqsave(&intf->seq_lock, flags);
3636 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3637 check_msg_timeout(intf, &(intf->seq_table[i]),
3638 &timeouts, timeout_period, i,
3640 spin_unlock_irqrestore(&intf->seq_lock, flags);
3642 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3643 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3646 * Maintenance mode handling. Check the timeout
3647 * optimistically before we claim the lock. It may
3648 * mean a timeout gets missed occasionally, but that
3649 * only means the timeout gets extended by one period
3650 * in that case. No big deal, and it avoids the lock
3653 if (intf->auto_maintenance_timeout > 0) {
3654 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3655 if (intf->auto_maintenance_timeout > 0) {
3656 intf->auto_maintenance_timeout
3658 if (!intf->maintenance_mode
3659 && (intf->auto_maintenance_timeout <= 0))
3661 intf->maintenance_mode_enable = 0;
3662 maintenance_mode_update(intf);
3665 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3672 static void ipmi_request_event(void)
3675 struct ipmi_smi_handlers *handlers;
3678 /* Called from the timer, no need to check if handlers is
3680 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3681 /* No event requests when in maintenance mode. */
3682 if (intf->maintenance_mode_enable)
3685 handlers = intf->handlers;
3687 handlers->request_events(intf->send_info);
3692 static struct timer_list ipmi_timer;
3694 /* Call every ~100 ms. */
3695 #define IPMI_TIMEOUT_TIME 100
3697 /* How many jiffies does it take to get to the timeout time. */
3698 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3700 /* Request events from the queue every second (this is the number of
3701 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3702 future, IPMI will add a way to know immediately if an event is in
3703 the queue and this silliness can go away. */
3704 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3706 static atomic_t stop_operation;
3707 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3709 static void ipmi_timeout(unsigned long data)
3711 if (atomic_read(&stop_operation))
3715 if (ticks_to_req_ev == 0) {
3716 ipmi_request_event();
3717 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3720 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3722 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3726 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3727 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3729 /* FIXME - convert these to slabs. */
3730 static void free_smi_msg(struct ipmi_smi_msg *msg)
3732 atomic_dec(&smi_msg_inuse_count);
3736 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3738 struct ipmi_smi_msg *rv;
3739 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3741 rv->done = free_smi_msg;
3742 rv->user_data = NULL;
3743 atomic_inc(&smi_msg_inuse_count);
3748 static void free_recv_msg(struct ipmi_recv_msg *msg)
3750 atomic_dec(&recv_msg_inuse_count);
3754 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3756 struct ipmi_recv_msg *rv;
3758 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3761 rv->done = free_recv_msg;
3762 atomic_inc(&recv_msg_inuse_count);
3767 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3770 kref_put(&msg->user->refcount, free_user);
3774 #ifdef CONFIG_IPMI_PANIC_EVENT
3776 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3780 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3784 #ifdef CONFIG_IPMI_PANIC_STRING
3785 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3787 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3788 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3789 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3790 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3792 /* A get event receiver command, save it. */
3793 intf->event_receiver = msg->msg.data[1];
3794 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3798 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3800 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3801 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3802 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3803 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3805 /* A get device id command, save if we are an event
3806 receiver or generator. */
3807 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3808 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3813 static void send_panic_events(char *str)
3815 struct kernel_ipmi_msg msg;
3817 unsigned char data[16];
3818 struct ipmi_system_interface_addr *si;
3819 struct ipmi_addr addr;
3820 struct ipmi_smi_msg smi_msg;
3821 struct ipmi_recv_msg recv_msg;
3823 si = (struct ipmi_system_interface_addr *) &addr;
3824 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3825 si->channel = IPMI_BMC_CHANNEL;
3828 /* Fill in an event telling that we have failed. */
3829 msg.netfn = 0x04; /* Sensor or Event. */
3830 msg.cmd = 2; /* Platform event command. */
3833 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3834 data[1] = 0x03; /* This is for IPMI 1.0. */
3835 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3836 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3837 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3839 /* Put a few breadcrumbs in. Hopefully later we can add more things
3840 to make the panic events more useful. */
3847 smi_msg.done = dummy_smi_done_handler;
3848 recv_msg.done = dummy_recv_done_handler;
3850 /* For every registered interface, send the event. */
3851 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3852 if (!intf->handlers)
3853 /* Interface is not ready. */
3856 intf->run_to_completion = 1;
3857 /* Send the event announcing the panic. */
3858 intf->handlers->set_run_to_completion(intf->send_info, 1);
3859 i_ipmi_request(NULL,
3868 intf->channels[0].address,
3869 intf->channels[0].lun,
3870 0, 1); /* Don't retry, and don't wait. */
3873 #ifdef CONFIG_IPMI_PANIC_STRING
3874 /* On every interface, dump a bunch of OEM event holding the
3879 /* For every registered interface, send the event. */
3880 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3882 struct ipmi_ipmb_addr *ipmb;
3885 if (intf->intf_num == -1)
3886 /* Interface was not ready yet. */
3890 * intf_num is used as an marker to tell if the
3891 * interface is valid. Thus we need a read barrier to
3892 * make sure data fetched before checking intf_num
3897 /* First job here is to figure out where to send the
3898 OEM events. There's no way in IPMI to send OEM
3899 events using an event send command, so we have to
3900 find the SEL to put them in and stick them in
3903 /* Get capabilities from the get device id. */
3904 intf->local_sel_device = 0;
3905 intf->local_event_generator = 0;
3906 intf->event_receiver = 0;
3908 /* Request the device info from the local MC. */
3909 msg.netfn = IPMI_NETFN_APP_REQUEST;
3910 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3913 intf->null_user_handler = device_id_fetcher;
3914 i_ipmi_request(NULL,
3923 intf->channels[0].address,
3924 intf->channels[0].lun,
3925 0, 1); /* Don't retry, and don't wait. */
3927 if (intf->local_event_generator) {
3928 /* Request the event receiver from the local MC. */
3929 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3930 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3933 intf->null_user_handler = event_receiver_fetcher;
3934 i_ipmi_request(NULL,
3943 intf->channels[0].address,
3944 intf->channels[0].lun,
3945 0, 1); /* no retry, and no wait. */
3947 intf->null_user_handler = NULL;
3949 /* Validate the event receiver. The low bit must not
3950 be 1 (it must be a valid IPMB address), it cannot
3951 be zero, and it must not be my address. */
3952 if (((intf->event_receiver & 1) == 0)
3953 && (intf->event_receiver != 0)
3954 && (intf->event_receiver != intf->channels[0].address))
3956 /* The event receiver is valid, send an IPMB
3958 ipmb = (struct ipmi_ipmb_addr *) &addr;
3959 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3960 ipmb->channel = 0; /* FIXME - is this right? */
3961 ipmb->lun = intf->event_receiver_lun;
3962 ipmb->slave_addr = intf->event_receiver;
3963 } else if (intf->local_sel_device) {
3964 /* The event receiver was not valid (or was
3965 me), but I am an SEL device, just dump it
3967 si = (struct ipmi_system_interface_addr *) &addr;
3968 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3969 si->channel = IPMI_BMC_CHANNEL;
3972 continue; /* No where to send the event. */
3975 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3976 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3982 int size = strlen(p);
3988 data[2] = 0xf0; /* OEM event without timestamp. */
3989 data[3] = intf->channels[0].address;
3990 data[4] = j++; /* sequence # */
3991 /* Always give 11 bytes, so strncpy will fill
3992 it with zeroes for me. */
3993 strncpy(data+5, p, 11);
3996 i_ipmi_request(NULL,
4005 intf->channels[0].address,
4006 intf->channels[0].lun,
4007 0, 1); /* no retry, and no wait. */
4010 #endif /* CONFIG_IPMI_PANIC_STRING */
4012 #endif /* CONFIG_IPMI_PANIC_EVENT */
4014 static int has_panicked;
4016 static int panic_event(struct notifier_block *this,
4017 unsigned long event,
4026 /* For every registered interface, set it to run to completion. */
4027 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4028 if (!intf->handlers)
4029 /* Interface is not ready. */
4032 intf->run_to_completion = 1;
4033 intf->handlers->set_run_to_completion(intf->send_info, 1);
4036 #ifdef CONFIG_IPMI_PANIC_EVENT
4037 send_panic_events(ptr);
4043 static struct notifier_block panic_block = {
4044 .notifier_call = panic_event,
4046 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4049 static int ipmi_init_msghandler(void)
4056 rv = driver_register(&ipmidriver);
4058 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4062 printk(KERN_INFO "ipmi message handler version "
4063 IPMI_DRIVER_VERSION "\n");
4065 #ifdef CONFIG_PROC_FS
4066 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4067 if (!proc_ipmi_root) {
4068 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4072 proc_ipmi_root->owner = THIS_MODULE;
4073 #endif /* CONFIG_PROC_FS */
4075 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4076 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4078 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4085 static __init int ipmi_init_msghandler_mod(void)
4087 ipmi_init_msghandler();
4091 static __exit void cleanup_ipmi(void)
4098 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4100 /* This can't be called if any interfaces exist, so no worry about
4101 shutting down the interfaces. */
4103 /* Tell the timer to stop, then wait for it to stop. This avoids
4104 problems with race conditions removing the timer here. */
4105 atomic_inc(&stop_operation);
4106 del_timer_sync(&ipmi_timer);
4108 #ifdef CONFIG_PROC_FS
4109 remove_proc_entry(proc_ipmi_root->name, NULL);
4110 #endif /* CONFIG_PROC_FS */
4112 driver_unregister(&ipmidriver);
4116 /* Check for buffer leaks. */
4117 count = atomic_read(&smi_msg_inuse_count);
4119 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4121 count = atomic_read(&recv_msg_inuse_count);
4123 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4126 module_exit(cleanup_ipmi);
4128 module_init(ipmi_init_msghandler_mod);
4129 MODULE_LICENSE("GPL");
4130 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4131 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4132 MODULE_VERSION(IPMI_DRIVER_VERSION);
4134 EXPORT_SYMBOL(ipmi_create_user);
4135 EXPORT_SYMBOL(ipmi_destroy_user);
4136 EXPORT_SYMBOL(ipmi_get_version);
4137 EXPORT_SYMBOL(ipmi_request_settime);
4138 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4139 EXPORT_SYMBOL(ipmi_poll_interface);
4140 EXPORT_SYMBOL(ipmi_register_smi);
4141 EXPORT_SYMBOL(ipmi_unregister_smi);
4142 EXPORT_SYMBOL(ipmi_register_for_cmd);
4143 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4144 EXPORT_SYMBOL(ipmi_smi_msg_received);
4145 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4146 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4147 EXPORT_SYMBOL(ipmi_addr_length);
4148 EXPORT_SYMBOL(ipmi_validate_addr);
4149 EXPORT_SYMBOL(ipmi_set_gets_events);
4150 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4151 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4152 EXPORT_SYMBOL(ipmi_set_my_address);
4153 EXPORT_SYMBOL(ipmi_get_my_address);
4154 EXPORT_SYMBOL(ipmi_set_my_LUN);
4155 EXPORT_SYMBOL(ipmi_get_my_LUN);
4156 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4157 EXPORT_SYMBOL(ipmi_free_recv_msg);