4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
48 #define PFX "IPMI message handler: "
50 #define IPMI_DRIVER_VERSION "39.1"
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized;
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
72 * The main "user" data structure.
76 struct list_head link;
78 /* Set to "0" when the user is destroyed. */
83 /* The upper layer that handles receive messages. */
84 struct ipmi_user_hndl *handler;
87 /* The interface this user is bound to. */
90 /* Does this interface receive IPMI events? */
96 struct list_head link;
104 * This is used to form a linked lised during mass deletion.
105 * Since this is in an RCU list, we cannot use the link above
106 * or change any data until the RCU period completes. So we
107 * use this next variable during mass deletion so we can have
108 * a list and don't have to wait and restart the search on
109 * every individual deletion of a command. */
110 struct cmd_rcvr *next;
115 unsigned int inuse : 1;
116 unsigned int broadcast : 1;
118 unsigned long timeout;
119 unsigned long orig_timeout;
120 unsigned int retries_left;
122 /* To verify on an incoming send message response that this is
123 the message that the response is for, we keep a sequence id
124 and increment it every time we send a message. */
127 /* This is held so we can properly respond to the message on a
128 timeout, and it is used to hold the temporary data for
129 retransmission, too. */
130 struct ipmi_recv_msg *recv_msg;
133 /* Store the information in a msgid (long) to allow us to find a
134 sequence table entry from the msgid. */
135 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
137 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
139 seq = ((msgid >> 26) & 0x3f); \
140 seqid = (msgid & 0x3fffff); \
143 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
147 unsigned char medium;
148 unsigned char protocol;
150 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
151 but may be changed by the user. */
152 unsigned char address;
154 /* My LUN. This should generally stay the SMS LUN, but just in
159 #ifdef CONFIG_PROC_FS
160 struct ipmi_proc_entry
163 struct ipmi_proc_entry *next;
169 struct platform_device *dev;
170 struct ipmi_device_id id;
171 unsigned char guid[16];
174 struct kref refcount;
176 /* bmc device attributes */
177 struct device_attribute device_id_attr;
178 struct device_attribute provides_dev_sdrs_attr;
179 struct device_attribute revision_attr;
180 struct device_attribute firmware_rev_attr;
181 struct device_attribute version_attr;
182 struct device_attribute add_dev_support_attr;
183 struct device_attribute manufacturer_id_attr;
184 struct device_attribute product_id_attr;
185 struct device_attribute guid_attr;
186 struct device_attribute aux_firmware_rev_attr;
189 #define IPMI_IPMB_NUM_SEQ 64
190 #define IPMI_MAX_CHANNELS 16
193 /* What interface number are we? */
196 struct kref refcount;
198 /* Used for a list of interfaces. */
199 struct list_head link;
201 /* The list of upper layers that are using me. seq_lock
203 struct list_head users;
205 /* Information to supply to users. */
206 unsigned char ipmi_version_major;
207 unsigned char ipmi_version_minor;
209 /* Used for wake ups at startup. */
210 wait_queue_head_t waitq;
212 struct bmc_device *bmc;
216 /* This is the lower-layer's sender routine. Note that you
217 * must either be holding the ipmi_interfaces_mutex or be in
218 * an umpreemptible region to use this. You must fetch the
219 * value into a local variable and make sure it is not NULL. */
220 struct ipmi_smi_handlers *handlers;
223 #ifdef CONFIG_PROC_FS
224 /* A list of proc entries for this interface. */
225 struct mutex proc_entry_lock;
226 struct ipmi_proc_entry *proc_entries;
229 /* Driver-model device for the system interface. */
230 struct device *si_dev;
232 /* A table of sequence numbers for this interface. We use the
233 sequence numbers for IPMB messages that go out of the
234 interface to match them up with their responses. A routine
235 is called periodically to time the items in this list. */
237 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
240 /* Messages that were delayed for some reason (out of memory,
241 for instance), will go in here to be processed later in a
242 periodic timer interrupt. */
243 spinlock_t waiting_msgs_lock;
244 struct list_head waiting_msgs;
246 /* The list of command receivers that are registered for commands
247 on this interface. */
248 struct mutex cmd_rcvrs_mutex;
249 struct list_head cmd_rcvrs;
251 /* Events that were queues because no one was there to receive
253 spinlock_t events_lock; /* For dealing with event stuff. */
254 struct list_head waiting_events;
255 unsigned int waiting_events_count; /* How many events in queue? */
256 int delivering_events;
258 /* The event receiver for my BMC, only really used at panic
259 shutdown as a place to store this. */
260 unsigned char event_receiver;
261 unsigned char event_receiver_lun;
262 unsigned char local_sel_device;
263 unsigned char local_event_generator;
265 /* For handling of maintenance mode. */
266 int maintenance_mode;
267 int maintenance_mode_enable;
268 int auto_maintenance_timeout;
269 spinlock_t maintenance_mode_lock; /* Used in a timer... */
271 /* A cheap hack, if this is non-null and a message to an
272 interface comes in with a NULL user, call this routine with
273 it. Note that the message will still be freed by the
274 caller. This only works on the system interface. */
275 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
277 /* When we are scanning the channels for an SMI, this will
278 tell which channel we are scanning. */
281 /* Channel information */
282 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
285 struct proc_dir_entry *proc_dir;
286 char proc_dir_name[10];
288 spinlock_t counter_lock; /* For making counters atomic. */
290 /* Commands we got that were invalid. */
291 unsigned int sent_invalid_commands;
293 /* Commands we sent to the MC. */
294 unsigned int sent_local_commands;
295 /* Responses from the MC that were delivered to a user. */
296 unsigned int handled_local_responses;
297 /* Responses from the MC that were not delivered to a user. */
298 unsigned int unhandled_local_responses;
300 /* Commands we sent out to the IPMB bus. */
301 unsigned int sent_ipmb_commands;
302 /* Commands sent on the IPMB that had errors on the SEND CMD */
303 unsigned int sent_ipmb_command_errs;
304 /* Each retransmit increments this count. */
305 unsigned int retransmitted_ipmb_commands;
306 /* When a message times out (runs out of retransmits) this is
308 unsigned int timed_out_ipmb_commands;
310 /* This is like above, but for broadcasts. Broadcasts are
311 *not* included in the above count (they are expected to
313 unsigned int timed_out_ipmb_broadcasts;
315 /* Responses I have sent to the IPMB bus. */
316 unsigned int sent_ipmb_responses;
318 /* The response was delivered to the user. */
319 unsigned int handled_ipmb_responses;
320 /* The response had invalid data in it. */
321 unsigned int invalid_ipmb_responses;
322 /* The response didn't have anyone waiting for it. */
323 unsigned int unhandled_ipmb_responses;
325 /* Commands we sent out to the IPMB bus. */
326 unsigned int sent_lan_commands;
327 /* Commands sent on the IPMB that had errors on the SEND CMD */
328 unsigned int sent_lan_command_errs;
329 /* Each retransmit increments this count. */
330 unsigned int retransmitted_lan_commands;
331 /* When a message times out (runs out of retransmits) this is
333 unsigned int timed_out_lan_commands;
335 /* Responses I have sent to the IPMB bus. */
336 unsigned int sent_lan_responses;
338 /* The response was delivered to the user. */
339 unsigned int handled_lan_responses;
340 /* The response had invalid data in it. */
341 unsigned int invalid_lan_responses;
342 /* The response didn't have anyone waiting for it. */
343 unsigned int unhandled_lan_responses;
345 /* The command was delivered to the user. */
346 unsigned int handled_commands;
347 /* The command had invalid data in it. */
348 unsigned int invalid_commands;
349 /* The command didn't have anyone waiting for it. */
350 unsigned int unhandled_commands;
352 /* Invalid data in an event. */
353 unsigned int invalid_events;
354 /* Events that were received with the proper format. */
357 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
360 * The driver model view of the IPMI messaging driver.
362 static struct device_driver ipmidriver = {
364 .bus = &platform_bus_type
366 static DEFINE_MUTEX(ipmidriver_mutex);
368 static LIST_HEAD(ipmi_interfaces);
369 static DEFINE_MUTEX(ipmi_interfaces_mutex);
371 /* List of watchers that want to know when smi's are added and
373 static LIST_HEAD(smi_watchers);
374 static DEFINE_MUTEX(smi_watchers_mutex);
377 static void free_recv_msg_list(struct list_head *q)
379 struct ipmi_recv_msg *msg, *msg2;
381 list_for_each_entry_safe(msg, msg2, q, link) {
382 list_del(&msg->link);
383 ipmi_free_recv_msg(msg);
387 static void free_smi_msg_list(struct list_head *q)
389 struct ipmi_smi_msg *msg, *msg2;
391 list_for_each_entry_safe(msg, msg2, q, link) {
392 list_del(&msg->link);
393 ipmi_free_smi_msg(msg);
397 static void clean_up_interface_data(ipmi_smi_t intf)
400 struct cmd_rcvr *rcvr, *rcvr2;
401 struct list_head list;
403 free_smi_msg_list(&intf->waiting_msgs);
404 free_recv_msg_list(&intf->waiting_events);
407 * Wholesale remove all the entries from the list in the
408 * interface and wait for RCU to know that none are in use.
410 mutex_lock(&intf->cmd_rcvrs_mutex);
411 INIT_LIST_HEAD(&list);
412 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
413 mutex_unlock(&intf->cmd_rcvrs_mutex);
415 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
418 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
419 if ((intf->seq_table[i].inuse)
420 && (intf->seq_table[i].recv_msg))
422 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
427 static void intf_free(struct kref *ref)
429 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
431 clean_up_interface_data(intf);
435 struct watcher_entry {
438 struct list_head link;
441 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
444 LIST_HEAD(to_deliver);
445 struct watcher_entry *e, *e2;
447 mutex_lock(&smi_watchers_mutex);
449 mutex_lock(&ipmi_interfaces_mutex);
451 /* Build a list of things to deliver. */
452 list_for_each_entry(intf, &ipmi_interfaces, link) {
453 if (intf->intf_num == -1)
455 e = kmalloc(sizeof(*e), GFP_KERNEL);
458 kref_get(&intf->refcount);
460 e->intf_num = intf->intf_num;
461 list_add_tail(&e->link, &to_deliver);
464 /* We will succeed, so add it to the list. */
465 list_add(&watcher->link, &smi_watchers);
467 mutex_unlock(&ipmi_interfaces_mutex);
469 list_for_each_entry_safe(e, e2, &to_deliver, link) {
471 watcher->new_smi(e->intf_num, e->intf->si_dev);
472 kref_put(&e->intf->refcount, intf_free);
476 mutex_unlock(&smi_watchers_mutex);
481 mutex_unlock(&ipmi_interfaces_mutex);
482 mutex_unlock(&smi_watchers_mutex);
483 list_for_each_entry_safe(e, e2, &to_deliver, link) {
485 kref_put(&e->intf->refcount, intf_free);
491 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
493 mutex_lock(&smi_watchers_mutex);
494 list_del(&(watcher->link));
495 mutex_unlock(&smi_watchers_mutex);
500 * Must be called with smi_watchers_mutex held.
503 call_smi_watchers(int i, struct device *dev)
505 struct ipmi_smi_watcher *w;
507 list_for_each_entry(w, &smi_watchers, link) {
508 if (try_module_get(w->owner)) {
510 module_put(w->owner);
516 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
518 if (addr1->addr_type != addr2->addr_type)
521 if (addr1->channel != addr2->channel)
524 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
525 struct ipmi_system_interface_addr *smi_addr1
526 = (struct ipmi_system_interface_addr *) addr1;
527 struct ipmi_system_interface_addr *smi_addr2
528 = (struct ipmi_system_interface_addr *) addr2;
529 return (smi_addr1->lun == smi_addr2->lun);
532 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
533 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
535 struct ipmi_ipmb_addr *ipmb_addr1
536 = (struct ipmi_ipmb_addr *) addr1;
537 struct ipmi_ipmb_addr *ipmb_addr2
538 = (struct ipmi_ipmb_addr *) addr2;
540 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
541 && (ipmb_addr1->lun == ipmb_addr2->lun));
544 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
545 struct ipmi_lan_addr *lan_addr1
546 = (struct ipmi_lan_addr *) addr1;
547 struct ipmi_lan_addr *lan_addr2
548 = (struct ipmi_lan_addr *) addr2;
550 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
551 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
552 && (lan_addr1->session_handle
553 == lan_addr2->session_handle)
554 && (lan_addr1->lun == lan_addr2->lun));
560 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
562 if (len < sizeof(struct ipmi_system_interface_addr)) {
566 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
567 if (addr->channel != IPMI_BMC_CHANNEL)
572 if ((addr->channel == IPMI_BMC_CHANNEL)
573 || (addr->channel >= IPMI_MAX_CHANNELS)
574 || (addr->channel < 0))
577 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
578 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
580 if (len < sizeof(struct ipmi_ipmb_addr)) {
586 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
587 if (len < sizeof(struct ipmi_lan_addr)) {
596 unsigned int ipmi_addr_length(int addr_type)
598 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
599 return sizeof(struct ipmi_system_interface_addr);
601 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
602 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
604 return sizeof(struct ipmi_ipmb_addr);
607 if (addr_type == IPMI_LAN_ADDR_TYPE)
608 return sizeof(struct ipmi_lan_addr);
613 static void deliver_response(struct ipmi_recv_msg *msg)
616 ipmi_smi_t intf = msg->user_msg_data;
619 /* Special handling for NULL users. */
620 if (intf->null_user_handler) {
621 intf->null_user_handler(intf, msg);
622 spin_lock_irqsave(&intf->counter_lock, flags);
623 intf->handled_local_responses++;
624 spin_unlock_irqrestore(&intf->counter_lock, flags);
626 /* No handler, so give up. */
627 spin_lock_irqsave(&intf->counter_lock, flags);
628 intf->unhandled_local_responses++;
629 spin_unlock_irqrestore(&intf->counter_lock, flags);
631 ipmi_free_recv_msg(msg);
633 ipmi_user_t user = msg->user;
634 user->handler->ipmi_recv_hndl(msg, user->handler_data);
639 deliver_err_response(struct ipmi_recv_msg *msg, int err)
641 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
642 msg->msg_data[0] = err;
643 msg->msg.netfn |= 1; /* Convert to a response. */
644 msg->msg.data_len = 1;
645 msg->msg.data = msg->msg_data;
646 deliver_response(msg);
649 /* Find the next sequence number not being used and add the given
650 message with the given timeout to the sequence table. This must be
651 called with the interface's seq_lock held. */
652 static int intf_next_seq(ipmi_smi_t intf,
653 struct ipmi_recv_msg *recv_msg,
654 unsigned long timeout,
663 for (i = intf->curr_seq;
664 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
665 i = (i+1)%IPMI_IPMB_NUM_SEQ)
667 if (!intf->seq_table[i].inuse)
671 if (!intf->seq_table[i].inuse) {
672 intf->seq_table[i].recv_msg = recv_msg;
674 /* Start with the maximum timeout, when the send response
675 comes in we will start the real timer. */
676 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
677 intf->seq_table[i].orig_timeout = timeout;
678 intf->seq_table[i].retries_left = retries;
679 intf->seq_table[i].broadcast = broadcast;
680 intf->seq_table[i].inuse = 1;
681 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
683 *seqid = intf->seq_table[i].seqid;
684 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
692 /* Return the receive message for the given sequence number and
693 release the sequence number so it can be reused. Some other data
694 is passed in to be sure the message matches up correctly (to help
695 guard against message coming in after their timeout and the
696 sequence number being reused). */
697 static int intf_find_seq(ipmi_smi_t intf,
702 struct ipmi_addr *addr,
703 struct ipmi_recv_msg **recv_msg)
708 if (seq >= IPMI_IPMB_NUM_SEQ)
711 spin_lock_irqsave(&(intf->seq_lock), flags);
712 if (intf->seq_table[seq].inuse) {
713 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
715 if ((msg->addr.channel == channel)
716 && (msg->msg.cmd == cmd)
717 && (msg->msg.netfn == netfn)
718 && (ipmi_addr_equal(addr, &(msg->addr))))
721 intf->seq_table[seq].inuse = 0;
725 spin_unlock_irqrestore(&(intf->seq_lock), flags);
731 /* Start the timer for a specific sequence table entry. */
732 static int intf_start_seq_timer(ipmi_smi_t intf,
741 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
743 spin_lock_irqsave(&(intf->seq_lock), flags);
744 /* We do this verification because the user can be deleted
745 while a message is outstanding. */
746 if ((intf->seq_table[seq].inuse)
747 && (intf->seq_table[seq].seqid == seqid))
749 struct seq_table *ent = &(intf->seq_table[seq]);
750 ent->timeout = ent->orig_timeout;
753 spin_unlock_irqrestore(&(intf->seq_lock), flags);
758 /* Got an error for the send message for a specific sequence number. */
759 static int intf_err_seq(ipmi_smi_t intf,
767 struct ipmi_recv_msg *msg = NULL;
770 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
772 spin_lock_irqsave(&(intf->seq_lock), flags);
773 /* We do this verification because the user can be deleted
774 while a message is outstanding. */
775 if ((intf->seq_table[seq].inuse)
776 && (intf->seq_table[seq].seqid == seqid))
778 struct seq_table *ent = &(intf->seq_table[seq]);
784 spin_unlock_irqrestore(&(intf->seq_lock), flags);
787 deliver_err_response(msg, err);
793 int ipmi_create_user(unsigned int if_num,
794 struct ipmi_user_hndl *handler,
799 ipmi_user_t new_user;
803 /* There is no module usecount here, because it's not
804 required. Since this can only be used by and called from
805 other modules, they will implicitly use this module, and
806 thus this can't be removed unless the other modules are
812 /* Make sure the driver is actually initialized, this handles
813 problems with initialization order. */
815 rv = ipmi_init_msghandler();
819 /* The init code doesn't return an error if it was turned
820 off, but it won't initialize. Check that. */
825 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
829 mutex_lock(&ipmi_interfaces_mutex);
830 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
831 if (intf->intf_num == if_num)
834 /* Not found, return an error */
839 /* Note that each existing user holds a refcount to the interface. */
840 kref_get(&intf->refcount);
842 kref_init(&new_user->refcount);
843 new_user->handler = handler;
844 new_user->handler_data = handler_data;
845 new_user->intf = intf;
846 new_user->gets_events = 0;
848 if (!try_module_get(intf->handlers->owner)) {
853 if (intf->handlers->inc_usecount) {
854 rv = intf->handlers->inc_usecount(intf->send_info);
856 module_put(intf->handlers->owner);
861 /* Hold the lock so intf->handlers is guaranteed to be good
863 mutex_unlock(&ipmi_interfaces_mutex);
866 spin_lock_irqsave(&intf->seq_lock, flags);
867 list_add_rcu(&new_user->link, &intf->users);
868 spin_unlock_irqrestore(&intf->seq_lock, flags);
873 kref_put(&intf->refcount, intf_free);
875 mutex_unlock(&ipmi_interfaces_mutex);
880 static void free_user(struct kref *ref)
882 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
886 int ipmi_destroy_user(ipmi_user_t user)
888 ipmi_smi_t intf = user->intf;
891 struct cmd_rcvr *rcvr;
892 struct cmd_rcvr *rcvrs = NULL;
896 /* Remove the user from the interface's sequence table. */
897 spin_lock_irqsave(&intf->seq_lock, flags);
898 list_del_rcu(&user->link);
900 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
901 if (intf->seq_table[i].inuse
902 && (intf->seq_table[i].recv_msg->user == user))
904 intf->seq_table[i].inuse = 0;
905 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
908 spin_unlock_irqrestore(&intf->seq_lock, flags);
911 * Remove the user from the command receiver's table. First
912 * we build a list of everything (not using the standard link,
913 * since other things may be using it till we do
914 * synchronize_rcu()) then free everything in that list.
916 mutex_lock(&intf->cmd_rcvrs_mutex);
917 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
918 if (rcvr->user == user) {
919 list_del_rcu(&rcvr->link);
924 mutex_unlock(&intf->cmd_rcvrs_mutex);
932 mutex_lock(&ipmi_interfaces_mutex);
933 if (intf->handlers) {
934 module_put(intf->handlers->owner);
935 if (intf->handlers->dec_usecount)
936 intf->handlers->dec_usecount(intf->send_info);
938 mutex_unlock(&ipmi_interfaces_mutex);
940 kref_put(&intf->refcount, intf_free);
942 kref_put(&user->refcount, free_user);
947 void ipmi_get_version(ipmi_user_t user,
948 unsigned char *major,
949 unsigned char *minor)
951 *major = user->intf->ipmi_version_major;
952 *minor = user->intf->ipmi_version_minor;
955 int ipmi_set_my_address(ipmi_user_t user,
956 unsigned int channel,
957 unsigned char address)
959 if (channel >= IPMI_MAX_CHANNELS)
961 user->intf->channels[channel].address = address;
965 int ipmi_get_my_address(ipmi_user_t user,
966 unsigned int channel,
967 unsigned char *address)
969 if (channel >= IPMI_MAX_CHANNELS)
971 *address = user->intf->channels[channel].address;
975 int ipmi_set_my_LUN(ipmi_user_t user,
976 unsigned int channel,
979 if (channel >= IPMI_MAX_CHANNELS)
981 user->intf->channels[channel].lun = LUN & 0x3;
985 int ipmi_get_my_LUN(ipmi_user_t user,
986 unsigned int channel,
987 unsigned char *address)
989 if (channel >= IPMI_MAX_CHANNELS)
991 *address = user->intf->channels[channel].lun;
995 int ipmi_get_maintenance_mode(ipmi_user_t user)
1000 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1001 mode = user->intf->maintenance_mode;
1002 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1006 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1008 static void maintenance_mode_update(ipmi_smi_t intf)
1010 if (intf->handlers->set_maintenance_mode)
1011 intf->handlers->set_maintenance_mode(
1012 intf->send_info, intf->maintenance_mode_enable);
1015 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1018 unsigned long flags;
1019 ipmi_smi_t intf = user->intf;
1021 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1022 if (intf->maintenance_mode != mode) {
1024 case IPMI_MAINTENANCE_MODE_AUTO:
1025 intf->maintenance_mode = mode;
1026 intf->maintenance_mode_enable
1027 = (intf->auto_maintenance_timeout > 0);
1030 case IPMI_MAINTENANCE_MODE_OFF:
1031 intf->maintenance_mode = mode;
1032 intf->maintenance_mode_enable = 0;
1035 case IPMI_MAINTENANCE_MODE_ON:
1036 intf->maintenance_mode = mode;
1037 intf->maintenance_mode_enable = 1;
1045 maintenance_mode_update(intf);
1048 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1052 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1054 int ipmi_set_gets_events(ipmi_user_t user, int val)
1056 unsigned long flags;
1057 ipmi_smi_t intf = user->intf;
1058 struct ipmi_recv_msg *msg, *msg2;
1059 struct list_head msgs;
1061 INIT_LIST_HEAD(&msgs);
1063 spin_lock_irqsave(&intf->events_lock, flags);
1064 user->gets_events = val;
1066 if (intf->delivering_events)
1068 * Another thread is delivering events for this, so
1069 * let it handle any new events.
1073 /* Deliver any queued events. */
1074 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1075 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1076 list_move_tail(&msg->link, &msgs);
1077 intf->waiting_events_count = 0;
1079 intf->delivering_events = 1;
1080 spin_unlock_irqrestore(&intf->events_lock, flags);
1082 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1084 kref_get(&user->refcount);
1085 deliver_response(msg);
1088 spin_lock_irqsave(&intf->events_lock, flags);
1089 intf->delivering_events = 0;
1093 spin_unlock_irqrestore(&intf->events_lock, flags);
1098 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1099 unsigned char netfn,
1103 struct cmd_rcvr *rcvr;
1105 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1106 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1107 && (rcvr->chans & (1 << chan)))
1113 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1114 unsigned char netfn,
1118 struct cmd_rcvr *rcvr;
1120 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1121 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1122 && (rcvr->chans & chans))
1128 int ipmi_register_for_cmd(ipmi_user_t user,
1129 unsigned char netfn,
1133 ipmi_smi_t intf = user->intf;
1134 struct cmd_rcvr *rcvr;
1138 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1142 rcvr->netfn = netfn;
1143 rcvr->chans = chans;
1146 mutex_lock(&intf->cmd_rcvrs_mutex);
1147 /* Make sure the command/netfn is not already registered. */
1148 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1153 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1156 mutex_unlock(&intf->cmd_rcvrs_mutex);
1163 int ipmi_unregister_for_cmd(ipmi_user_t user,
1164 unsigned char netfn,
1168 ipmi_smi_t intf = user->intf;
1169 struct cmd_rcvr *rcvr;
1170 struct cmd_rcvr *rcvrs = NULL;
1171 int i, rv = -ENOENT;
1173 mutex_lock(&intf->cmd_rcvrs_mutex);
1174 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1175 if (((1 << i) & chans) == 0)
1177 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1180 if (rcvr->user == user) {
1182 rcvr->chans &= ~chans;
1183 if (rcvr->chans == 0) {
1184 list_del_rcu(&rcvr->link);
1190 mutex_unlock(&intf->cmd_rcvrs_mutex);
1200 static unsigned char
1201 ipmb_checksum(unsigned char *data, int size)
1203 unsigned char csum = 0;
1205 for (; size > 0; size--, data++)
1211 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1212 struct kernel_ipmi_msg *msg,
1213 struct ipmi_ipmb_addr *ipmb_addr,
1215 unsigned char ipmb_seq,
1217 unsigned char source_address,
1218 unsigned char source_lun)
1222 /* Format the IPMB header data. */
1223 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1224 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1225 smi_msg->data[2] = ipmb_addr->channel;
1227 smi_msg->data[3] = 0;
1228 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1229 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1230 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1231 smi_msg->data[i+6] = source_address;
1232 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1233 smi_msg->data[i+8] = msg->cmd;
1235 /* Now tack on the data to the message. */
1236 if (msg->data_len > 0)
1237 memcpy(&(smi_msg->data[i+9]), msg->data,
1239 smi_msg->data_size = msg->data_len + 9;
1241 /* Now calculate the checksum and tack it on. */
1242 smi_msg->data[i+smi_msg->data_size]
1243 = ipmb_checksum(&(smi_msg->data[i+6]),
1244 smi_msg->data_size-6);
1246 /* Add on the checksum size and the offset from the
1248 smi_msg->data_size += 1 + i;
1250 smi_msg->msgid = msgid;
1253 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1254 struct kernel_ipmi_msg *msg,
1255 struct ipmi_lan_addr *lan_addr,
1257 unsigned char ipmb_seq,
1258 unsigned char source_lun)
1260 /* Format the IPMB header data. */
1261 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1262 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1263 smi_msg->data[2] = lan_addr->channel;
1264 smi_msg->data[3] = lan_addr->session_handle;
1265 smi_msg->data[4] = lan_addr->remote_SWID;
1266 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1267 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1268 smi_msg->data[7] = lan_addr->local_SWID;
1269 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1270 smi_msg->data[9] = msg->cmd;
1272 /* Now tack on the data to the message. */
1273 if (msg->data_len > 0)
1274 memcpy(&(smi_msg->data[10]), msg->data,
1276 smi_msg->data_size = msg->data_len + 10;
1278 /* Now calculate the checksum and tack it on. */
1279 smi_msg->data[smi_msg->data_size]
1280 = ipmb_checksum(&(smi_msg->data[7]),
1281 smi_msg->data_size-7);
1283 /* Add on the checksum size and the offset from the
1285 smi_msg->data_size += 1;
1287 smi_msg->msgid = msgid;
1290 /* Separate from ipmi_request so that the user does not have to be
1291 supplied in certain circumstances (mainly at panic time). If
1292 messages are supplied, they will be freed, even if an error
1294 static int i_ipmi_request(ipmi_user_t user,
1296 struct ipmi_addr *addr,
1298 struct kernel_ipmi_msg *msg,
1299 void *user_msg_data,
1301 struct ipmi_recv_msg *supplied_recv,
1303 unsigned char source_address,
1304 unsigned char source_lun,
1306 unsigned int retry_time_ms)
1309 struct ipmi_smi_msg *smi_msg;
1310 struct ipmi_recv_msg *recv_msg;
1311 unsigned long flags;
1312 struct ipmi_smi_handlers *handlers;
1315 if (supplied_recv) {
1316 recv_msg = supplied_recv;
1318 recv_msg = ipmi_alloc_recv_msg();
1319 if (recv_msg == NULL) {
1323 recv_msg->user_msg_data = user_msg_data;
1326 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1328 smi_msg = ipmi_alloc_smi_msg();
1329 if (smi_msg == NULL) {
1330 ipmi_free_recv_msg(recv_msg);
1336 handlers = intf->handlers;
1342 recv_msg->user = user;
1344 kref_get(&user->refcount);
1345 recv_msg->msgid = msgid;
1346 /* Store the message to send in the receive message so timeout
1347 responses can get the proper response data. */
1348 recv_msg->msg = *msg;
1350 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1351 struct ipmi_system_interface_addr *smi_addr;
1353 if (msg->netfn & 1) {
1354 /* Responses are not allowed to the SMI. */
1359 smi_addr = (struct ipmi_system_interface_addr *) addr;
1360 if (smi_addr->lun > 3) {
1361 spin_lock_irqsave(&intf->counter_lock, flags);
1362 intf->sent_invalid_commands++;
1363 spin_unlock_irqrestore(&intf->counter_lock, flags);
1368 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1370 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1371 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1372 || (msg->cmd == IPMI_GET_MSG_CMD)
1373 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1375 /* We don't let the user do these, since we manage
1376 the sequence numbers. */
1377 spin_lock_irqsave(&intf->counter_lock, flags);
1378 intf->sent_invalid_commands++;
1379 spin_unlock_irqrestore(&intf->counter_lock, flags);
1384 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1385 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1386 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1387 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1389 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1390 intf->auto_maintenance_timeout
1391 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1392 if (!intf->maintenance_mode
1393 && !intf->maintenance_mode_enable)
1395 intf->maintenance_mode_enable = 1;
1396 maintenance_mode_update(intf);
1398 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1402 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1403 spin_lock_irqsave(&intf->counter_lock, flags);
1404 intf->sent_invalid_commands++;
1405 spin_unlock_irqrestore(&intf->counter_lock, flags);
1410 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1411 smi_msg->data[1] = msg->cmd;
1412 smi_msg->msgid = msgid;
1413 smi_msg->user_data = recv_msg;
1414 if (msg->data_len > 0)
1415 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1416 smi_msg->data_size = msg->data_len + 2;
1417 spin_lock_irqsave(&intf->counter_lock, flags);
1418 intf->sent_local_commands++;
1419 spin_unlock_irqrestore(&intf->counter_lock, flags);
1420 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1421 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1423 struct ipmi_ipmb_addr *ipmb_addr;
1424 unsigned char ipmb_seq;
1428 if (addr->channel >= IPMI_MAX_CHANNELS) {
1429 spin_lock_irqsave(&intf->counter_lock, flags);
1430 intf->sent_invalid_commands++;
1431 spin_unlock_irqrestore(&intf->counter_lock, flags);
1436 if (intf->channels[addr->channel].medium
1437 != IPMI_CHANNEL_MEDIUM_IPMB)
1439 spin_lock_irqsave(&intf->counter_lock, flags);
1440 intf->sent_invalid_commands++;
1441 spin_unlock_irqrestore(&intf->counter_lock, flags);
1447 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1448 retries = 0; /* Don't retry broadcasts. */
1452 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1453 /* Broadcasts add a zero at the beginning of the
1454 message, but otherwise is the same as an IPMB
1456 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1461 /* Default to 1 second retries. */
1462 if (retry_time_ms == 0)
1463 retry_time_ms = 1000;
1465 /* 9 for the header and 1 for the checksum, plus
1466 possibly one for the broadcast. */
1467 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1468 spin_lock_irqsave(&intf->counter_lock, flags);
1469 intf->sent_invalid_commands++;
1470 spin_unlock_irqrestore(&intf->counter_lock, flags);
1475 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1476 if (ipmb_addr->lun > 3) {
1477 spin_lock_irqsave(&intf->counter_lock, flags);
1478 intf->sent_invalid_commands++;
1479 spin_unlock_irqrestore(&intf->counter_lock, flags);
1484 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1486 if (recv_msg->msg.netfn & 0x1) {
1487 /* It's a response, so use the user's sequence
1489 spin_lock_irqsave(&intf->counter_lock, flags);
1490 intf->sent_ipmb_responses++;
1491 spin_unlock_irqrestore(&intf->counter_lock, flags);
1492 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1494 source_address, source_lun);
1496 /* Save the receive message so we can use it
1497 to deliver the response. */
1498 smi_msg->user_data = recv_msg;
1500 /* It's a command, so get a sequence for it. */
1502 spin_lock_irqsave(&(intf->seq_lock), flags);
1504 spin_lock(&intf->counter_lock);
1505 intf->sent_ipmb_commands++;
1506 spin_unlock(&intf->counter_lock);
1508 /* Create a sequence number with a 1 second
1509 timeout and 4 retries. */
1510 rv = intf_next_seq(intf,
1518 /* We have used up all the sequence numbers,
1519 probably, so abort. */
1520 spin_unlock_irqrestore(&(intf->seq_lock),
1525 /* Store the sequence number in the message,
1526 so that when the send message response
1527 comes back we can start the timer. */
1528 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1529 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1530 ipmb_seq, broadcast,
1531 source_address, source_lun);
1533 /* Copy the message into the recv message data, so we
1534 can retransmit it later if necessary. */
1535 memcpy(recv_msg->msg_data, smi_msg->data,
1536 smi_msg->data_size);
1537 recv_msg->msg.data = recv_msg->msg_data;
1538 recv_msg->msg.data_len = smi_msg->data_size;
1540 /* We don't unlock until here, because we need
1541 to copy the completed message into the
1542 recv_msg before we release the lock.
1543 Otherwise, race conditions may bite us. I
1544 know that's pretty paranoid, but I prefer
1546 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1548 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1549 struct ipmi_lan_addr *lan_addr;
1550 unsigned char ipmb_seq;
1553 if (addr->channel >= IPMI_MAX_CHANNELS) {
1554 spin_lock_irqsave(&intf->counter_lock, flags);
1555 intf->sent_invalid_commands++;
1556 spin_unlock_irqrestore(&intf->counter_lock, flags);
1561 if ((intf->channels[addr->channel].medium
1562 != IPMI_CHANNEL_MEDIUM_8023LAN)
1563 && (intf->channels[addr->channel].medium
1564 != IPMI_CHANNEL_MEDIUM_ASYNC))
1566 spin_lock_irqsave(&intf->counter_lock, flags);
1567 intf->sent_invalid_commands++;
1568 spin_unlock_irqrestore(&intf->counter_lock, flags);
1575 /* Default to 1 second retries. */
1576 if (retry_time_ms == 0)
1577 retry_time_ms = 1000;
1579 /* 11 for the header and 1 for the checksum. */
1580 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1581 spin_lock_irqsave(&intf->counter_lock, flags);
1582 intf->sent_invalid_commands++;
1583 spin_unlock_irqrestore(&intf->counter_lock, flags);
1588 lan_addr = (struct ipmi_lan_addr *) addr;
1589 if (lan_addr->lun > 3) {
1590 spin_lock_irqsave(&intf->counter_lock, flags);
1591 intf->sent_invalid_commands++;
1592 spin_unlock_irqrestore(&intf->counter_lock, flags);
1597 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1599 if (recv_msg->msg.netfn & 0x1) {
1600 /* It's a response, so use the user's sequence
1602 spin_lock_irqsave(&intf->counter_lock, flags);
1603 intf->sent_lan_responses++;
1604 spin_unlock_irqrestore(&intf->counter_lock, flags);
1605 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1608 /* Save the receive message so we can use it
1609 to deliver the response. */
1610 smi_msg->user_data = recv_msg;
1612 /* It's a command, so get a sequence for it. */
1614 spin_lock_irqsave(&(intf->seq_lock), flags);
1616 spin_lock(&intf->counter_lock);
1617 intf->sent_lan_commands++;
1618 spin_unlock(&intf->counter_lock);
1620 /* Create a sequence number with a 1 second
1621 timeout and 4 retries. */
1622 rv = intf_next_seq(intf,
1630 /* We have used up all the sequence numbers,
1631 probably, so abort. */
1632 spin_unlock_irqrestore(&(intf->seq_lock),
1637 /* Store the sequence number in the message,
1638 so that when the send message response
1639 comes back we can start the timer. */
1640 format_lan_msg(smi_msg, msg, lan_addr,
1641 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1642 ipmb_seq, source_lun);
1644 /* Copy the message into the recv message data, so we
1645 can retransmit it later if necessary. */
1646 memcpy(recv_msg->msg_data, smi_msg->data,
1647 smi_msg->data_size);
1648 recv_msg->msg.data = recv_msg->msg_data;
1649 recv_msg->msg.data_len = smi_msg->data_size;
1651 /* We don't unlock until here, because we need
1652 to copy the completed message into the
1653 recv_msg before we release the lock.
1654 Otherwise, race conditions may bite us. I
1655 know that's pretty paranoid, but I prefer
1657 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1660 /* Unknown address type. */
1661 spin_lock_irqsave(&intf->counter_lock, flags);
1662 intf->sent_invalid_commands++;
1663 spin_unlock_irqrestore(&intf->counter_lock, flags);
1671 for (m = 0; m < smi_msg->data_size; m++)
1672 printk(" %2.2x", smi_msg->data[m]);
1677 handlers->sender(intf->send_info, smi_msg, priority);
1684 ipmi_free_smi_msg(smi_msg);
1685 ipmi_free_recv_msg(recv_msg);
1689 static int check_addr(ipmi_smi_t intf,
1690 struct ipmi_addr *addr,
1691 unsigned char *saddr,
1694 if (addr->channel >= IPMI_MAX_CHANNELS)
1696 *lun = intf->channels[addr->channel].lun;
1697 *saddr = intf->channels[addr->channel].address;
1701 int ipmi_request_settime(ipmi_user_t user,
1702 struct ipmi_addr *addr,
1704 struct kernel_ipmi_msg *msg,
1705 void *user_msg_data,
1708 unsigned int retry_time_ms)
1710 unsigned char saddr, lun;
1715 rv = check_addr(user->intf, addr, &saddr, &lun);
1718 return i_ipmi_request(user,
1732 int ipmi_request_supply_msgs(ipmi_user_t user,
1733 struct ipmi_addr *addr,
1735 struct kernel_ipmi_msg *msg,
1736 void *user_msg_data,
1738 struct ipmi_recv_msg *supplied_recv,
1741 unsigned char saddr, lun;
1746 rv = check_addr(user->intf, addr, &saddr, &lun);
1749 return i_ipmi_request(user,
1763 #ifdef CONFIG_PROC_FS
1764 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1765 int count, int *eof, void *data)
1767 char *out = (char *) page;
1768 ipmi_smi_t intf = data;
1772 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1773 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1774 out[rv-1] = '\n'; /* Replace the final space with a newline */
1780 static int version_file_read_proc(char *page, char **start, off_t off,
1781 int count, int *eof, void *data)
1783 char *out = (char *) page;
1784 ipmi_smi_t intf = data;
1786 return sprintf(out, "%d.%d\n",
1787 ipmi_version_major(&intf->bmc->id),
1788 ipmi_version_minor(&intf->bmc->id));
1791 static int stat_file_read_proc(char *page, char **start, off_t off,
1792 int count, int *eof, void *data)
1794 char *out = (char *) page;
1795 ipmi_smi_t intf = data;
1797 out += sprintf(out, "sent_invalid_commands: %d\n",
1798 intf->sent_invalid_commands);
1799 out += sprintf(out, "sent_local_commands: %d\n",
1800 intf->sent_local_commands);
1801 out += sprintf(out, "handled_local_responses: %d\n",
1802 intf->handled_local_responses);
1803 out += sprintf(out, "unhandled_local_responses: %d\n",
1804 intf->unhandled_local_responses);
1805 out += sprintf(out, "sent_ipmb_commands: %d\n",
1806 intf->sent_ipmb_commands);
1807 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1808 intf->sent_ipmb_command_errs);
1809 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1810 intf->retransmitted_ipmb_commands);
1811 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1812 intf->timed_out_ipmb_commands);
1813 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1814 intf->timed_out_ipmb_broadcasts);
1815 out += sprintf(out, "sent_ipmb_responses: %d\n",
1816 intf->sent_ipmb_responses);
1817 out += sprintf(out, "handled_ipmb_responses: %d\n",
1818 intf->handled_ipmb_responses);
1819 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1820 intf->invalid_ipmb_responses);
1821 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1822 intf->unhandled_ipmb_responses);
1823 out += sprintf(out, "sent_lan_commands: %d\n",
1824 intf->sent_lan_commands);
1825 out += sprintf(out, "sent_lan_command_errs: %d\n",
1826 intf->sent_lan_command_errs);
1827 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1828 intf->retransmitted_lan_commands);
1829 out += sprintf(out, "timed_out_lan_commands: %d\n",
1830 intf->timed_out_lan_commands);
1831 out += sprintf(out, "sent_lan_responses: %d\n",
1832 intf->sent_lan_responses);
1833 out += sprintf(out, "handled_lan_responses: %d\n",
1834 intf->handled_lan_responses);
1835 out += sprintf(out, "invalid_lan_responses: %d\n",
1836 intf->invalid_lan_responses);
1837 out += sprintf(out, "unhandled_lan_responses: %d\n",
1838 intf->unhandled_lan_responses);
1839 out += sprintf(out, "handled_commands: %d\n",
1840 intf->handled_commands);
1841 out += sprintf(out, "invalid_commands: %d\n",
1842 intf->invalid_commands);
1843 out += sprintf(out, "unhandled_commands: %d\n",
1844 intf->unhandled_commands);
1845 out += sprintf(out, "invalid_events: %d\n",
1846 intf->invalid_events);
1847 out += sprintf(out, "events: %d\n",
1850 return (out - ((char *) page));
1852 #endif /* CONFIG_PROC_FS */
1854 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1855 read_proc_t *read_proc, write_proc_t *write_proc,
1856 void *data, struct module *owner)
1859 #ifdef CONFIG_PROC_FS
1860 struct proc_dir_entry *file;
1861 struct ipmi_proc_entry *entry;
1863 /* Create a list element. */
1864 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1867 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1872 strcpy(entry->name, name);
1874 file = create_proc_entry(name, 0, smi->proc_dir);
1881 file->read_proc = read_proc;
1882 file->write_proc = write_proc;
1883 file->owner = owner;
1885 mutex_lock(&smi->proc_entry_lock);
1886 /* Stick it on the list. */
1887 entry->next = smi->proc_entries;
1888 smi->proc_entries = entry;
1889 mutex_unlock(&smi->proc_entry_lock);
1891 #endif /* CONFIG_PROC_FS */
1896 static int add_proc_entries(ipmi_smi_t smi, int num)
1900 #ifdef CONFIG_PROC_FS
1901 sprintf(smi->proc_dir_name, "%d", num);
1902 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1906 smi->proc_dir->owner = THIS_MODULE;
1910 rv = ipmi_smi_add_proc_entry(smi, "stats",
1911 stat_file_read_proc, NULL,
1915 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1916 ipmb_file_read_proc, NULL,
1920 rv = ipmi_smi_add_proc_entry(smi, "version",
1921 version_file_read_proc, NULL,
1923 #endif /* CONFIG_PROC_FS */
1928 static void remove_proc_entries(ipmi_smi_t smi)
1930 #ifdef CONFIG_PROC_FS
1931 struct ipmi_proc_entry *entry;
1933 mutex_lock(&smi->proc_entry_lock);
1934 while (smi->proc_entries) {
1935 entry = smi->proc_entries;
1936 smi->proc_entries = entry->next;
1938 remove_proc_entry(entry->name, smi->proc_dir);
1942 mutex_unlock(&smi->proc_entry_lock);
1943 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1944 #endif /* CONFIG_PROC_FS */
1947 static int __find_bmc_guid(struct device *dev, void *data)
1949 unsigned char *id = data;
1950 struct bmc_device *bmc = dev_get_drvdata(dev);
1951 return memcmp(bmc->guid, id, 16) == 0;
1954 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1955 unsigned char *guid)
1959 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1961 return dev_get_drvdata(dev);
1966 struct prod_dev_id {
1967 unsigned int product_id;
1968 unsigned char device_id;
1971 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1973 struct prod_dev_id *id = data;
1974 struct bmc_device *bmc = dev_get_drvdata(dev);
1976 return (bmc->id.product_id == id->product_id
1977 && bmc->id.device_id == id->device_id);
1980 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1981 struct device_driver *drv,
1982 unsigned int product_id, unsigned char device_id)
1984 struct prod_dev_id id = {
1985 .product_id = product_id,
1986 .device_id = device_id,
1990 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1992 return dev_get_drvdata(dev);
1997 static ssize_t device_id_show(struct device *dev,
1998 struct device_attribute *attr,
2001 struct bmc_device *bmc = dev_get_drvdata(dev);
2003 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2006 static ssize_t provides_dev_sdrs_show(struct device *dev,
2007 struct device_attribute *attr,
2010 struct bmc_device *bmc = dev_get_drvdata(dev);
2012 return snprintf(buf, 10, "%u\n",
2013 (bmc->id.device_revision & 0x80) >> 7);
2016 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2019 struct bmc_device *bmc = dev_get_drvdata(dev);
2021 return snprintf(buf, 20, "%u\n",
2022 bmc->id.device_revision & 0x0F);
2025 static ssize_t firmware_rev_show(struct device *dev,
2026 struct device_attribute *attr,
2029 struct bmc_device *bmc = dev_get_drvdata(dev);
2031 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2032 bmc->id.firmware_revision_2);
2035 static ssize_t ipmi_version_show(struct device *dev,
2036 struct device_attribute *attr,
2039 struct bmc_device *bmc = dev_get_drvdata(dev);
2041 return snprintf(buf, 20, "%u.%u\n",
2042 ipmi_version_major(&bmc->id),
2043 ipmi_version_minor(&bmc->id));
2046 static ssize_t add_dev_support_show(struct device *dev,
2047 struct device_attribute *attr,
2050 struct bmc_device *bmc = dev_get_drvdata(dev);
2052 return snprintf(buf, 10, "0x%02x\n",
2053 bmc->id.additional_device_support);
2056 static ssize_t manufacturer_id_show(struct device *dev,
2057 struct device_attribute *attr,
2060 struct bmc_device *bmc = dev_get_drvdata(dev);
2062 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2065 static ssize_t product_id_show(struct device *dev,
2066 struct device_attribute *attr,
2069 struct bmc_device *bmc = dev_get_drvdata(dev);
2071 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2074 static ssize_t aux_firmware_rev_show(struct device *dev,
2075 struct device_attribute *attr,
2078 struct bmc_device *bmc = dev_get_drvdata(dev);
2080 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2081 bmc->id.aux_firmware_revision[3],
2082 bmc->id.aux_firmware_revision[2],
2083 bmc->id.aux_firmware_revision[1],
2084 bmc->id.aux_firmware_revision[0]);
2087 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2090 struct bmc_device *bmc = dev_get_drvdata(dev);
2092 return snprintf(buf, 100, "%Lx%Lx\n",
2093 (long long) bmc->guid[0],
2094 (long long) bmc->guid[8]);
2097 static void remove_files(struct bmc_device *bmc)
2102 device_remove_file(&bmc->dev->dev,
2103 &bmc->device_id_attr);
2104 device_remove_file(&bmc->dev->dev,
2105 &bmc->provides_dev_sdrs_attr);
2106 device_remove_file(&bmc->dev->dev,
2107 &bmc->revision_attr);
2108 device_remove_file(&bmc->dev->dev,
2109 &bmc->firmware_rev_attr);
2110 device_remove_file(&bmc->dev->dev,
2111 &bmc->version_attr);
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->add_dev_support_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->manufacturer_id_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->product_id_attr);
2119 if (bmc->id.aux_firmware_revision_set)
2120 device_remove_file(&bmc->dev->dev,
2121 &bmc->aux_firmware_rev_attr);
2123 device_remove_file(&bmc->dev->dev,
2128 cleanup_bmc_device(struct kref *ref)
2130 struct bmc_device *bmc;
2132 bmc = container_of(ref, struct bmc_device, refcount);
2135 platform_device_unregister(bmc->dev);
2139 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2141 struct bmc_device *bmc = intf->bmc;
2143 if (intf->sysfs_name) {
2144 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2145 kfree(intf->sysfs_name);
2146 intf->sysfs_name = NULL;
2148 if (intf->my_dev_name) {
2149 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2150 kfree(intf->my_dev_name);
2151 intf->my_dev_name = NULL;
2154 mutex_lock(&ipmidriver_mutex);
2155 kref_put(&bmc->refcount, cleanup_bmc_device);
2157 mutex_unlock(&ipmidriver_mutex);
2160 static int create_files(struct bmc_device *bmc)
2164 bmc->device_id_attr.attr.name = "device_id";
2165 bmc->device_id_attr.attr.mode = S_IRUGO;
2166 bmc->device_id_attr.show = device_id_show;
2168 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2169 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2170 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2172 bmc->revision_attr.attr.name = "revision";
2173 bmc->revision_attr.attr.mode = S_IRUGO;
2174 bmc->revision_attr.show = revision_show;
2176 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2177 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2178 bmc->firmware_rev_attr.show = firmware_rev_show;
2180 bmc->version_attr.attr.name = "ipmi_version";
2181 bmc->version_attr.attr.mode = S_IRUGO;
2182 bmc->version_attr.show = ipmi_version_show;
2184 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2185 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2186 bmc->add_dev_support_attr.show = add_dev_support_show;
2188 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2189 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2190 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2192 bmc->product_id_attr.attr.name = "product_id";
2193 bmc->product_id_attr.attr.mode = S_IRUGO;
2194 bmc->product_id_attr.show = product_id_show;
2196 bmc->guid_attr.attr.name = "guid";
2197 bmc->guid_attr.attr.mode = S_IRUGO;
2198 bmc->guid_attr.show = guid_show;
2200 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2201 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2202 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2204 err = device_create_file(&bmc->dev->dev,
2205 &bmc->device_id_attr);
2207 err = device_create_file(&bmc->dev->dev,
2208 &bmc->provides_dev_sdrs_attr);
2209 if (err) goto out_devid;
2210 err = device_create_file(&bmc->dev->dev,
2211 &bmc->revision_attr);
2212 if (err) goto out_sdrs;
2213 err = device_create_file(&bmc->dev->dev,
2214 &bmc->firmware_rev_attr);
2215 if (err) goto out_rev;
2216 err = device_create_file(&bmc->dev->dev,
2217 &bmc->version_attr);
2218 if (err) goto out_firm;
2219 err = device_create_file(&bmc->dev->dev,
2220 &bmc->add_dev_support_attr);
2221 if (err) goto out_version;
2222 err = device_create_file(&bmc->dev->dev,
2223 &bmc->manufacturer_id_attr);
2224 if (err) goto out_add_dev;
2225 err = device_create_file(&bmc->dev->dev,
2226 &bmc->product_id_attr);
2227 if (err) goto out_manu;
2228 if (bmc->id.aux_firmware_revision_set) {
2229 err = device_create_file(&bmc->dev->dev,
2230 &bmc->aux_firmware_rev_attr);
2231 if (err) goto out_prod_id;
2233 if (bmc->guid_set) {
2234 err = device_create_file(&bmc->dev->dev,
2236 if (err) goto out_aux_firm;
2242 if (bmc->id.aux_firmware_revision_set)
2243 device_remove_file(&bmc->dev->dev,
2244 &bmc->aux_firmware_rev_attr);
2246 device_remove_file(&bmc->dev->dev,
2247 &bmc->product_id_attr);
2249 device_remove_file(&bmc->dev->dev,
2250 &bmc->manufacturer_id_attr);
2252 device_remove_file(&bmc->dev->dev,
2253 &bmc->add_dev_support_attr);
2255 device_remove_file(&bmc->dev->dev,
2256 &bmc->version_attr);
2258 device_remove_file(&bmc->dev->dev,
2259 &bmc->firmware_rev_attr);
2261 device_remove_file(&bmc->dev->dev,
2262 &bmc->revision_attr);
2264 device_remove_file(&bmc->dev->dev,
2265 &bmc->provides_dev_sdrs_attr);
2267 device_remove_file(&bmc->dev->dev,
2268 &bmc->device_id_attr);
2273 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2274 const char *sysfs_name)
2277 struct bmc_device *bmc = intf->bmc;
2278 struct bmc_device *old_bmc;
2282 mutex_lock(&ipmidriver_mutex);
2285 * Try to find if there is an bmc_device struct
2286 * representing the interfaced BMC already
2289 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2291 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2296 * If there is already an bmc_device, free the new one,
2297 * otherwise register the new BMC device
2301 intf->bmc = old_bmc;
2304 kref_get(&bmc->refcount);
2305 mutex_unlock(&ipmidriver_mutex);
2308 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2309 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2310 bmc->id.manufacturer_id,
2315 unsigned char orig_dev_id = bmc->id.device_id;
2316 int warn_printed = 0;
2318 snprintf(name, sizeof(name),
2319 "ipmi_bmc.%4.4x", bmc->id.product_id);
2321 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2323 bmc->id.device_id)) {
2324 if (!warn_printed) {
2325 printk(KERN_WARNING PFX
2326 "This machine has two different BMCs"
2327 " with the same product id and device"
2328 " id. This is an error in the"
2329 " firmware, but incrementing the"
2330 " device id to work around the problem."
2331 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2332 bmc->id.product_id, bmc->id.device_id);
2335 bmc->id.device_id++; /* Wraps at 255 */
2336 if (bmc->id.device_id == orig_dev_id) {
2338 "Out of device ids!\n");
2343 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2345 mutex_unlock(&ipmidriver_mutex);
2348 " Unable to allocate platform device\n");
2351 bmc->dev->dev.driver = &ipmidriver;
2352 dev_set_drvdata(&bmc->dev->dev, bmc);
2353 kref_init(&bmc->refcount);
2355 rv = platform_device_add(bmc->dev);
2356 mutex_unlock(&ipmidriver_mutex);
2358 platform_device_put(bmc->dev);
2362 " Unable to register bmc device: %d\n",
2364 /* Don't go to out_err, you can only do that if
2365 the device is registered already. */
2369 rv = create_files(bmc);
2371 mutex_lock(&ipmidriver_mutex);
2372 platform_device_unregister(bmc->dev);
2373 mutex_unlock(&ipmidriver_mutex);
2379 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2380 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2381 bmc->id.manufacturer_id,
2387 * create symlink from system interface device to bmc device
2390 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2391 if (!intf->sysfs_name) {
2394 "ipmi_msghandler: allocate link to BMC: %d\n",
2399 rv = sysfs_create_link(&intf->si_dev->kobj,
2400 &bmc->dev->dev.kobj, intf->sysfs_name);
2402 kfree(intf->sysfs_name);
2403 intf->sysfs_name = NULL;
2405 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2410 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2411 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2412 if (!intf->my_dev_name) {
2413 kfree(intf->sysfs_name);
2414 intf->sysfs_name = NULL;
2417 "ipmi_msghandler: allocate link from BMC: %d\n",
2421 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2423 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2426 kfree(intf->sysfs_name);
2427 intf->sysfs_name = NULL;
2428 kfree(intf->my_dev_name);
2429 intf->my_dev_name = NULL;
2432 " Unable to create symlink to bmc: %d\n",
2440 ipmi_bmc_unregister(intf);
2445 send_guid_cmd(ipmi_smi_t intf, int chan)
2447 struct kernel_ipmi_msg msg;
2448 struct ipmi_system_interface_addr si;
2450 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2451 si.channel = IPMI_BMC_CHANNEL;
2454 msg.netfn = IPMI_NETFN_APP_REQUEST;
2455 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2458 return i_ipmi_request(NULL,
2460 (struct ipmi_addr *) &si,
2467 intf->channels[0].address,
2468 intf->channels[0].lun,
2473 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2475 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2476 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2477 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2481 if (msg->msg.data[0] != 0) {
2482 /* Error from getting the GUID, the BMC doesn't have one. */
2483 intf->bmc->guid_set = 0;
2487 if (msg->msg.data_len < 17) {
2488 intf->bmc->guid_set = 0;
2489 printk(KERN_WARNING PFX
2490 "guid_handler: The GUID response from the BMC was too"
2491 " short, it was %d but should have been 17. Assuming"
2492 " GUID is not available.\n",
2497 memcpy(intf->bmc->guid, msg->msg.data, 16);
2498 intf->bmc->guid_set = 1;
2500 wake_up(&intf->waitq);
2504 get_guid(ipmi_smi_t intf)
2508 intf->bmc->guid_set = 0x2;
2509 intf->null_user_handler = guid_handler;
2510 rv = send_guid_cmd(intf, 0);
2512 /* Send failed, no GUID available. */
2513 intf->bmc->guid_set = 0;
2514 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2515 intf->null_user_handler = NULL;
2519 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2521 struct kernel_ipmi_msg msg;
2522 unsigned char data[1];
2523 struct ipmi_system_interface_addr si;
2525 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2526 si.channel = IPMI_BMC_CHANNEL;
2529 msg.netfn = IPMI_NETFN_APP_REQUEST;
2530 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2534 return i_ipmi_request(NULL,
2536 (struct ipmi_addr *) &si,
2543 intf->channels[0].address,
2544 intf->channels[0].lun,
2549 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2554 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2555 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2556 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2558 /* It's the one we want */
2559 if (msg->msg.data[0] != 0) {
2560 /* Got an error from the channel, just go on. */
2562 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2563 /* If the MC does not support this
2564 command, that is legal. We just
2565 assume it has one IPMB at channel
2567 intf->channels[0].medium
2568 = IPMI_CHANNEL_MEDIUM_IPMB;
2569 intf->channels[0].protocol
2570 = IPMI_CHANNEL_PROTOCOL_IPMB;
2573 intf->curr_channel = IPMI_MAX_CHANNELS;
2574 wake_up(&intf->waitq);
2579 if (msg->msg.data_len < 4) {
2580 /* Message not big enough, just go on. */
2583 chan = intf->curr_channel;
2584 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2585 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2588 intf->curr_channel++;
2589 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2590 wake_up(&intf->waitq);
2592 rv = send_channel_info_cmd(intf, intf->curr_channel);
2595 /* Got an error somehow, just give up. */
2596 intf->curr_channel = IPMI_MAX_CHANNELS;
2597 wake_up(&intf->waitq);
2599 printk(KERN_WARNING PFX
2600 "Error sending channel information: %d\n",
2608 void ipmi_poll_interface(ipmi_user_t user)
2610 ipmi_smi_t intf = user->intf;
2612 if (intf->handlers->poll)
2613 intf->handlers->poll(intf->send_info);
2616 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2618 struct ipmi_device_id *device_id,
2619 struct device *si_dev,
2620 const char *sysfs_name,
2621 unsigned char slave_addr)
2627 struct list_head *link;
2629 /* Make sure the driver is actually initialized, this handles
2630 problems with initialization order. */
2632 rv = ipmi_init_msghandler();
2635 /* The init code doesn't return an error if it was turned
2636 off, but it won't initialize. Check that. */
2641 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2645 intf->ipmi_version_major = ipmi_version_major(device_id);
2646 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2648 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2653 intf->intf_num = -1; /* Mark it invalid for now. */
2654 kref_init(&intf->refcount);
2655 intf->bmc->id = *device_id;
2656 intf->si_dev = si_dev;
2657 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2658 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2659 intf->channels[j].lun = 2;
2661 if (slave_addr != 0)
2662 intf->channels[0].address = slave_addr;
2663 INIT_LIST_HEAD(&intf->users);
2664 intf->handlers = handlers;
2665 intf->send_info = send_info;
2666 spin_lock_init(&intf->seq_lock);
2667 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2668 intf->seq_table[j].inuse = 0;
2669 intf->seq_table[j].seqid = 0;
2672 #ifdef CONFIG_PROC_FS
2673 mutex_init(&intf->proc_entry_lock);
2675 spin_lock_init(&intf->waiting_msgs_lock);
2676 INIT_LIST_HEAD(&intf->waiting_msgs);
2677 spin_lock_init(&intf->events_lock);
2678 INIT_LIST_HEAD(&intf->waiting_events);
2679 intf->waiting_events_count = 0;
2680 mutex_init(&intf->cmd_rcvrs_mutex);
2681 spin_lock_init(&intf->maintenance_mode_lock);
2682 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2683 init_waitqueue_head(&intf->waitq);
2685 spin_lock_init(&intf->counter_lock);
2686 intf->proc_dir = NULL;
2688 mutex_lock(&smi_watchers_mutex);
2689 mutex_lock(&ipmi_interfaces_mutex);
2690 /* Look for a hole in the numbers. */
2692 link = &ipmi_interfaces;
2693 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2694 if (tintf->intf_num != i) {
2695 link = &tintf->link;
2700 /* Add the new interface in numeric order. */
2702 list_add_rcu(&intf->link, &ipmi_interfaces);
2704 list_add_tail_rcu(&intf->link, link);
2706 rv = handlers->start_processing(send_info, intf);
2712 if ((intf->ipmi_version_major > 1)
2713 || ((intf->ipmi_version_major == 1)
2714 && (intf->ipmi_version_minor >= 5)))
2716 /* Start scanning the channels to see what is
2718 intf->null_user_handler = channel_handler;
2719 intf->curr_channel = 0;
2720 rv = send_channel_info_cmd(intf, 0);
2724 /* Wait for the channel info to be read. */
2725 wait_event(intf->waitq,
2726 intf->curr_channel >= IPMI_MAX_CHANNELS);
2727 intf->null_user_handler = NULL;
2729 /* Assume a single IPMB channel at zero. */
2730 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2731 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2735 rv = add_proc_entries(intf, i);
2737 rv = ipmi_bmc_register(intf, i, sysfs_name);
2742 remove_proc_entries(intf);
2743 intf->handlers = NULL;
2744 list_del_rcu(&intf->link);
2745 mutex_unlock(&ipmi_interfaces_mutex);
2746 mutex_unlock(&smi_watchers_mutex);
2748 kref_put(&intf->refcount, intf_free);
2751 * Keep memory order straight for RCU readers. Make
2752 * sure everything else is committed to memory before
2753 * setting intf_num to mark the interface valid.
2757 mutex_unlock(&ipmi_interfaces_mutex);
2758 /* After this point the interface is legal to use. */
2759 call_smi_watchers(i, intf->si_dev);
2760 mutex_unlock(&smi_watchers_mutex);
2766 static void cleanup_smi_msgs(ipmi_smi_t intf)
2769 struct seq_table *ent;
2771 /* No need for locks, the interface is down. */
2772 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2773 ent = &(intf->seq_table[i]);
2776 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2780 int ipmi_unregister_smi(ipmi_smi_t intf)
2782 struct ipmi_smi_watcher *w;
2783 int intf_num = intf->intf_num;
2785 ipmi_bmc_unregister(intf);
2787 mutex_lock(&smi_watchers_mutex);
2788 mutex_lock(&ipmi_interfaces_mutex);
2789 intf->intf_num = -1;
2790 intf->handlers = NULL;
2791 list_del_rcu(&intf->link);
2792 mutex_unlock(&ipmi_interfaces_mutex);
2795 cleanup_smi_msgs(intf);
2797 remove_proc_entries(intf);
2799 /* Call all the watcher interfaces to tell them that
2800 an interface is gone. */
2801 list_for_each_entry(w, &smi_watchers, link)
2802 w->smi_gone(intf_num);
2803 mutex_unlock(&smi_watchers_mutex);
2805 kref_put(&intf->refcount, intf_free);
2809 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2810 struct ipmi_smi_msg *msg)
2812 struct ipmi_ipmb_addr ipmb_addr;
2813 struct ipmi_recv_msg *recv_msg;
2814 unsigned long flags;
2817 /* This is 11, not 10, because the response must contain a
2818 * completion code. */
2819 if (msg->rsp_size < 11) {
2820 /* Message not big enough, just ignore it. */
2821 spin_lock_irqsave(&intf->counter_lock, flags);
2822 intf->invalid_ipmb_responses++;
2823 spin_unlock_irqrestore(&intf->counter_lock, flags);
2827 if (msg->rsp[2] != 0) {
2828 /* An error getting the response, just ignore it. */
2832 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2833 ipmb_addr.slave_addr = msg->rsp[6];
2834 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2835 ipmb_addr.lun = msg->rsp[7] & 3;
2837 /* It's a response from a remote entity. Look up the sequence
2838 number and handle the response. */
2839 if (intf_find_seq(intf,
2843 (msg->rsp[4] >> 2) & (~1),
2844 (struct ipmi_addr *) &(ipmb_addr),
2847 /* We were unable to find the sequence number,
2848 so just nuke the message. */
2849 spin_lock_irqsave(&intf->counter_lock, flags);
2850 intf->unhandled_ipmb_responses++;
2851 spin_unlock_irqrestore(&intf->counter_lock, flags);
2855 memcpy(recv_msg->msg_data,
2858 /* THe other fields matched, so no need to set them, except
2859 for netfn, which needs to be the response that was
2860 returned, not the request value. */
2861 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2862 recv_msg->msg.data = recv_msg->msg_data;
2863 recv_msg->msg.data_len = msg->rsp_size - 10;
2864 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2865 spin_lock_irqsave(&intf->counter_lock, flags);
2866 intf->handled_ipmb_responses++;
2867 spin_unlock_irqrestore(&intf->counter_lock, flags);
2868 deliver_response(recv_msg);
2873 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2874 struct ipmi_smi_msg *msg)
2876 struct cmd_rcvr *rcvr;
2878 unsigned char netfn;
2881 ipmi_user_t user = NULL;
2882 struct ipmi_ipmb_addr *ipmb_addr;
2883 struct ipmi_recv_msg *recv_msg;
2884 unsigned long flags;
2885 struct ipmi_smi_handlers *handlers;
2887 if (msg->rsp_size < 10) {
2888 /* Message not big enough, just ignore it. */
2889 spin_lock_irqsave(&intf->counter_lock, flags);
2890 intf->invalid_commands++;
2891 spin_unlock_irqrestore(&intf->counter_lock, flags);
2895 if (msg->rsp[2] != 0) {
2896 /* An error getting the response, just ignore it. */
2900 netfn = msg->rsp[4] >> 2;
2902 chan = msg->rsp[3] & 0xf;
2905 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2908 kref_get(&user->refcount);
2914 /* We didn't find a user, deliver an error response. */
2915 spin_lock_irqsave(&intf->counter_lock, flags);
2916 intf->unhandled_commands++;
2917 spin_unlock_irqrestore(&intf->counter_lock, flags);
2919 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2920 msg->data[1] = IPMI_SEND_MSG_CMD;
2921 msg->data[2] = msg->rsp[3];
2922 msg->data[3] = msg->rsp[6];
2923 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2924 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2925 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2927 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2928 msg->data[8] = msg->rsp[8]; /* cmd */
2929 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2930 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2931 msg->data_size = 11;
2936 printk("Invalid command:");
2937 for (m = 0; m < msg->data_size; m++)
2938 printk(" %2.2x", msg->data[m]);
2943 handlers = intf->handlers;
2945 handlers->sender(intf->send_info, msg, 0);
2946 /* We used the message, so return the value
2947 that causes it to not be freed or
2953 /* Deliver the message to the user. */
2954 spin_lock_irqsave(&intf->counter_lock, flags);
2955 intf->handled_commands++;
2956 spin_unlock_irqrestore(&intf->counter_lock, flags);
2958 recv_msg = ipmi_alloc_recv_msg();
2960 /* We couldn't allocate memory for the
2961 message, so requeue it for handling
2964 kref_put(&user->refcount, free_user);
2966 /* Extract the source address from the data. */
2967 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2968 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2969 ipmb_addr->slave_addr = msg->rsp[6];
2970 ipmb_addr->lun = msg->rsp[7] & 3;
2971 ipmb_addr->channel = msg->rsp[3] & 0xf;
2973 /* Extract the rest of the message information
2974 from the IPMB header.*/
2975 recv_msg->user = user;
2976 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2977 recv_msg->msgid = msg->rsp[7] >> 2;
2978 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2979 recv_msg->msg.cmd = msg->rsp[8];
2980 recv_msg->msg.data = recv_msg->msg_data;
2982 /* We chop off 10, not 9 bytes because the checksum
2983 at the end also needs to be removed. */
2984 recv_msg->msg.data_len = msg->rsp_size - 10;
2985 memcpy(recv_msg->msg_data,
2987 msg->rsp_size - 10);
2988 deliver_response(recv_msg);
2995 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2996 struct ipmi_smi_msg *msg)
2998 struct ipmi_lan_addr lan_addr;
2999 struct ipmi_recv_msg *recv_msg;
3000 unsigned long flags;
3003 /* This is 13, not 12, because the response must contain a
3004 * completion code. */
3005 if (msg->rsp_size < 13) {
3006 /* Message not big enough, just ignore it. */
3007 spin_lock_irqsave(&intf->counter_lock, flags);
3008 intf->invalid_lan_responses++;
3009 spin_unlock_irqrestore(&intf->counter_lock, flags);
3013 if (msg->rsp[2] != 0) {
3014 /* An error getting the response, just ignore it. */
3018 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3019 lan_addr.session_handle = msg->rsp[4];
3020 lan_addr.remote_SWID = msg->rsp[8];
3021 lan_addr.local_SWID = msg->rsp[5];
3022 lan_addr.channel = msg->rsp[3] & 0x0f;
3023 lan_addr.privilege = msg->rsp[3] >> 4;
3024 lan_addr.lun = msg->rsp[9] & 3;
3026 /* It's a response from a remote entity. Look up the sequence
3027 number and handle the response. */
3028 if (intf_find_seq(intf,
3032 (msg->rsp[6] >> 2) & (~1),
3033 (struct ipmi_addr *) &(lan_addr),
3036 /* We were unable to find the sequence number,
3037 so just nuke the message. */
3038 spin_lock_irqsave(&intf->counter_lock, flags);
3039 intf->unhandled_lan_responses++;
3040 spin_unlock_irqrestore(&intf->counter_lock, flags);
3044 memcpy(recv_msg->msg_data,
3046 msg->rsp_size - 11);
3047 /* The other fields matched, so no need to set them, except
3048 for netfn, which needs to be the response that was
3049 returned, not the request value. */
3050 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3051 recv_msg->msg.data = recv_msg->msg_data;
3052 recv_msg->msg.data_len = msg->rsp_size - 12;
3053 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3054 spin_lock_irqsave(&intf->counter_lock, flags);
3055 intf->handled_lan_responses++;
3056 spin_unlock_irqrestore(&intf->counter_lock, flags);
3057 deliver_response(recv_msg);
3062 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3063 struct ipmi_smi_msg *msg)
3065 struct cmd_rcvr *rcvr;
3067 unsigned char netfn;
3070 ipmi_user_t user = NULL;
3071 struct ipmi_lan_addr *lan_addr;
3072 struct ipmi_recv_msg *recv_msg;
3073 unsigned long flags;
3075 if (msg->rsp_size < 12) {
3076 /* Message not big enough, just ignore it. */
3077 spin_lock_irqsave(&intf->counter_lock, flags);
3078 intf->invalid_commands++;
3079 spin_unlock_irqrestore(&intf->counter_lock, flags);
3083 if (msg->rsp[2] != 0) {
3084 /* An error getting the response, just ignore it. */
3088 netfn = msg->rsp[6] >> 2;
3090 chan = msg->rsp[3] & 0xf;
3093 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3096 kref_get(&user->refcount);
3102 /* We didn't find a user, just give up. */
3103 spin_lock_irqsave(&intf->counter_lock, flags);
3104 intf->unhandled_commands++;
3105 spin_unlock_irqrestore(&intf->counter_lock, flags);
3107 rv = 0; /* Don't do anything with these messages, just
3108 allow them to be freed. */
3110 /* Deliver the message to the user. */
3111 spin_lock_irqsave(&intf->counter_lock, flags);
3112 intf->handled_commands++;
3113 spin_unlock_irqrestore(&intf->counter_lock, flags);
3115 recv_msg = ipmi_alloc_recv_msg();
3117 /* We couldn't allocate memory for the
3118 message, so requeue it for handling
3121 kref_put(&user->refcount, free_user);
3123 /* Extract the source address from the data. */
3124 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3125 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3126 lan_addr->session_handle = msg->rsp[4];
3127 lan_addr->remote_SWID = msg->rsp[8];
3128 lan_addr->local_SWID = msg->rsp[5];
3129 lan_addr->lun = msg->rsp[9] & 3;
3130 lan_addr->channel = msg->rsp[3] & 0xf;
3131 lan_addr->privilege = msg->rsp[3] >> 4;
3133 /* Extract the rest of the message information
3134 from the IPMB header.*/
3135 recv_msg->user = user;
3136 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3137 recv_msg->msgid = msg->rsp[9] >> 2;
3138 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3139 recv_msg->msg.cmd = msg->rsp[10];
3140 recv_msg->msg.data = recv_msg->msg_data;
3142 /* We chop off 12, not 11 bytes because the checksum
3143 at the end also needs to be removed. */
3144 recv_msg->msg.data_len = msg->rsp_size - 12;
3145 memcpy(recv_msg->msg_data,
3147 msg->rsp_size - 12);
3148 deliver_response(recv_msg);
3155 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3156 struct ipmi_smi_msg *msg)
3158 struct ipmi_system_interface_addr *smi_addr;
3160 recv_msg->msgid = 0;
3161 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3162 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3163 smi_addr->channel = IPMI_BMC_CHANNEL;
3164 smi_addr->lun = msg->rsp[0] & 3;
3165 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3166 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3167 recv_msg->msg.cmd = msg->rsp[1];
3168 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3169 recv_msg->msg.data = recv_msg->msg_data;
3170 recv_msg->msg.data_len = msg->rsp_size - 3;
3173 static int handle_read_event_rsp(ipmi_smi_t intf,
3174 struct ipmi_smi_msg *msg)
3176 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3177 struct list_head msgs;
3180 int deliver_count = 0;
3181 unsigned long flags;
3183 if (msg->rsp_size < 19) {
3184 /* Message is too small to be an IPMB event. */
3185 spin_lock_irqsave(&intf->counter_lock, flags);
3186 intf->invalid_events++;
3187 spin_unlock_irqrestore(&intf->counter_lock, flags);
3191 if (msg->rsp[2] != 0) {
3192 /* An error getting the event, just ignore it. */
3196 INIT_LIST_HEAD(&msgs);
3198 spin_lock_irqsave(&intf->events_lock, flags);
3200 spin_lock(&intf->counter_lock);
3202 spin_unlock(&intf->counter_lock);
3204 /* Allocate and fill in one message for every user that is getting
3207 list_for_each_entry_rcu(user, &intf->users, link) {
3208 if (!user->gets_events)
3211 recv_msg = ipmi_alloc_recv_msg();
3214 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3216 list_del(&recv_msg->link);
3217 ipmi_free_recv_msg(recv_msg);
3219 /* We couldn't allocate memory for the
3220 message, so requeue it for handling
3228 copy_event_into_recv_msg(recv_msg, msg);
3229 recv_msg->user = user;
3230 kref_get(&user->refcount);
3231 list_add_tail(&(recv_msg->link), &msgs);
3235 if (deliver_count) {
3236 /* Now deliver all the messages. */
3237 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3238 list_del(&recv_msg->link);
3239 deliver_response(recv_msg);
3241 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3242 /* No one to receive the message, put it in queue if there's
3243 not already too many things in the queue. */
3244 recv_msg = ipmi_alloc_recv_msg();
3246 /* We couldn't allocate memory for the
3247 message, so requeue it for handling
3253 copy_event_into_recv_msg(recv_msg, msg);
3254 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3255 intf->waiting_events_count++;
3257 /* There's too many things in the queue, discard this
3259 printk(KERN_WARNING PFX "Event queue full, discarding an"
3260 " incoming event\n");
3264 spin_unlock_irqrestore(&(intf->events_lock), flags);
3269 static int handle_bmc_rsp(ipmi_smi_t intf,
3270 struct ipmi_smi_msg *msg)
3272 struct ipmi_recv_msg *recv_msg;
3273 unsigned long flags;
3274 struct ipmi_user *user;
3276 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3277 if (recv_msg == NULL)
3279 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3280 "could be because of a malformed message, or\n"
3281 "because of a hardware error. Contact your\n"
3282 "hardware vender for assistance\n");
3286 user = recv_msg->user;
3287 /* Make sure the user still exists. */
3288 if (user && !user->valid) {
3289 /* The user for the message went away, so give up. */
3290 spin_lock_irqsave(&intf->counter_lock, flags);
3291 intf->unhandled_local_responses++;
3292 spin_unlock_irqrestore(&intf->counter_lock, flags);
3293 ipmi_free_recv_msg(recv_msg);
3295 struct ipmi_system_interface_addr *smi_addr;
3297 spin_lock_irqsave(&intf->counter_lock, flags);
3298 intf->handled_local_responses++;
3299 spin_unlock_irqrestore(&intf->counter_lock, flags);
3300 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3301 recv_msg->msgid = msg->msgid;
3302 smi_addr = ((struct ipmi_system_interface_addr *)
3304 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3305 smi_addr->channel = IPMI_BMC_CHANNEL;
3306 smi_addr->lun = msg->rsp[0] & 3;
3307 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3308 recv_msg->msg.cmd = msg->rsp[1];
3309 memcpy(recv_msg->msg_data,
3312 recv_msg->msg.data = recv_msg->msg_data;
3313 recv_msg->msg.data_len = msg->rsp_size - 2;
3314 deliver_response(recv_msg);
3320 /* Handle a new message. Return 1 if the message should be requeued,
3321 0 if the message should be freed, or -1 if the message should not
3322 be freed or requeued. */
3323 static int handle_new_recv_msg(ipmi_smi_t intf,
3324 struct ipmi_smi_msg *msg)
3332 for (m = 0; m < msg->rsp_size; m++)
3333 printk(" %2.2x", msg->rsp[m]);
3336 if (msg->rsp_size < 2) {
3337 /* Message is too small to be correct. */
3338 printk(KERN_WARNING PFX "BMC returned to small a message"
3339 " for netfn %x cmd %x, got %d bytes\n",
3340 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3342 /* Generate an error response for the message. */
3343 msg->rsp[0] = msg->data[0] | (1 << 2);
3344 msg->rsp[1] = msg->data[1];
3345 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3347 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3348 || (msg->rsp[1] != msg->data[1])) /* Command */
3350 /* The response is not even marginally correct. */
3351 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3352 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3353 (msg->data[0] >> 2) | 1, msg->data[1],
3354 msg->rsp[0] >> 2, msg->rsp[1]);
3356 /* Generate an error response for the message. */
3357 msg->rsp[0] = msg->data[0] | (1 << 2);
3358 msg->rsp[1] = msg->data[1];
3359 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3363 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3364 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3365 && (msg->user_data != NULL))
3367 /* It's a response to a response we sent. For this we
3368 deliver a send message response to the user. */
3369 struct ipmi_recv_msg *recv_msg = msg->user_data;
3372 if (msg->rsp_size < 2)
3373 /* Message is too small to be correct. */
3376 chan = msg->data[2] & 0x0f;
3377 if (chan >= IPMI_MAX_CHANNELS)
3378 /* Invalid channel number */
3384 /* Make sure the user still exists. */
3385 if (!recv_msg->user || !recv_msg->user->valid)
3388 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3389 recv_msg->msg.data = recv_msg->msg_data;
3390 recv_msg->msg.data_len = 1;
3391 recv_msg->msg_data[0] = msg->rsp[2];
3392 deliver_response(recv_msg);
3393 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3394 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3396 /* It's from the receive queue. */
3397 chan = msg->rsp[3] & 0xf;
3398 if (chan >= IPMI_MAX_CHANNELS) {
3399 /* Invalid channel number */
3404 switch (intf->channels[chan].medium) {
3405 case IPMI_CHANNEL_MEDIUM_IPMB:
3406 if (msg->rsp[4] & 0x04) {
3407 /* It's a response, so find the
3408 requesting message and send it up. */
3409 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3411 /* It's a command to the SMS from some other
3412 entity. Handle that. */
3413 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3417 case IPMI_CHANNEL_MEDIUM_8023LAN:
3418 case IPMI_CHANNEL_MEDIUM_ASYNC:
3419 if (msg->rsp[6] & 0x04) {
3420 /* It's a response, so find the
3421 requesting message and send it up. */
3422 requeue = handle_lan_get_msg_rsp(intf, msg);
3424 /* It's a command to the SMS from some other
3425 entity. Handle that. */
3426 requeue = handle_lan_get_msg_cmd(intf, msg);
3431 /* We don't handle the channel type, so just
3432 * free the message. */
3436 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3437 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3439 /* It's an asyncronous event. */
3440 requeue = handle_read_event_rsp(intf, msg);
3442 /* It's a response from the local BMC. */
3443 requeue = handle_bmc_rsp(intf, msg);
3450 /* Handle a new message from the lower layer. */
3451 void ipmi_smi_msg_received(ipmi_smi_t intf,
3452 struct ipmi_smi_msg *msg)
3454 unsigned long flags;
3458 if ((msg->data_size >= 2)
3459 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3460 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3461 && (msg->user_data == NULL))
3463 /* This is the local response to a command send, start
3464 the timer for these. The user_data will not be
3465 NULL if this is a response send, and we will let
3466 response sends just go through. */
3468 /* Check for errors, if we get certain errors (ones
3469 that mean basically we can try again later), we
3470 ignore them and start the timer. Otherwise we
3471 report the error immediately. */
3472 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3473 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3474 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3475 && (msg->rsp[2] != IPMI_BUS_ERR)
3476 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3478 int chan = msg->rsp[3] & 0xf;
3480 /* Got an error sending the message, handle it. */
3481 spin_lock_irqsave(&intf->counter_lock, flags);
3482 if (chan >= IPMI_MAX_CHANNELS)
3483 ; /* This shouldn't happen */
3484 else if ((intf->channels[chan].medium
3485 == IPMI_CHANNEL_MEDIUM_8023LAN)
3486 || (intf->channels[chan].medium
3487 == IPMI_CHANNEL_MEDIUM_ASYNC))
3488 intf->sent_lan_command_errs++;
3490 intf->sent_ipmb_command_errs++;
3491 spin_unlock_irqrestore(&intf->counter_lock, flags);
3492 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3494 /* The message was sent, start the timer. */
3495 intf_start_seq_timer(intf, msg->msgid);
3498 ipmi_free_smi_msg(msg);
3502 /* To preserve message order, if the list is not empty, we
3503 tack this message onto the end of the list. */
3504 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3505 if (!list_empty(&intf->waiting_msgs)) {
3506 list_add_tail(&msg->link, &intf->waiting_msgs);
3507 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3510 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3512 rv = handle_new_recv_msg(intf, msg);
3514 /* Could not handle the message now, just add it to a
3515 list to handle later. */
3516 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3517 list_add_tail(&msg->link, &intf->waiting_msgs);
3518 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3519 } else if (rv == 0) {
3520 ipmi_free_smi_msg(msg);
3527 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3532 list_for_each_entry_rcu(user, &intf->users, link) {
3533 if (!user->handler->ipmi_watchdog_pretimeout)
3536 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3542 static struct ipmi_smi_msg *
3543 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3544 unsigned char seq, long seqid)
3546 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3548 /* If we can't allocate the message, then just return, we
3549 get 4 retries, so this should be ok. */
3552 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3553 smi_msg->data_size = recv_msg->msg.data_len;
3554 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3560 for (m = 0; m < smi_msg->data_size; m++)
3561 printk(" %2.2x", smi_msg->data[m]);
3568 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3569 struct list_head *timeouts, long timeout_period,
3570 int slot, unsigned long *flags)
3572 struct ipmi_recv_msg *msg;
3573 struct ipmi_smi_handlers *handlers;
3575 if (intf->intf_num == -1)
3581 ent->timeout -= timeout_period;
3582 if (ent->timeout > 0)
3585 if (ent->retries_left == 0) {
3586 /* The message has used all its retries. */
3588 msg = ent->recv_msg;
3589 list_add_tail(&msg->link, timeouts);
3590 spin_lock(&intf->counter_lock);
3592 intf->timed_out_ipmb_broadcasts++;
3593 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3594 intf->timed_out_lan_commands++;
3596 intf->timed_out_ipmb_commands++;
3597 spin_unlock(&intf->counter_lock);
3599 struct ipmi_smi_msg *smi_msg;
3600 /* More retries, send again. */
3602 /* Start with the max timer, set to normal
3603 timer after the message is sent. */
3604 ent->timeout = MAX_MSG_TIMEOUT;
3605 ent->retries_left--;
3606 spin_lock(&intf->counter_lock);
3607 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3608 intf->retransmitted_lan_commands++;
3610 intf->retransmitted_ipmb_commands++;
3611 spin_unlock(&intf->counter_lock);
3613 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3618 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3620 /* Send the new message. We send with a zero
3621 * priority. It timed out, I doubt time is
3622 * that critical now, and high priority
3623 * messages are really only for messages to the
3624 * local MC, which don't get resent. */
3625 handlers = intf->handlers;
3627 intf->handlers->sender(intf->send_info,
3630 ipmi_free_smi_msg(smi_msg);
3632 spin_lock_irqsave(&intf->seq_lock, *flags);
3636 static void ipmi_timeout_handler(long timeout_period)
3639 struct list_head timeouts;
3640 struct ipmi_recv_msg *msg, *msg2;
3641 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3642 unsigned long flags;
3646 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3647 /* See if any waiting messages need to be processed. */
3648 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3649 list_for_each_entry_safe(smi_msg, smi_msg2,
3650 &intf->waiting_msgs, link) {
3651 if (!handle_new_recv_msg(intf, smi_msg)) {
3652 list_del(&smi_msg->link);
3653 ipmi_free_smi_msg(smi_msg);
3655 /* To preserve message order, quit if we
3656 can't handle a message. */
3660 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3662 /* Go through the seq table and find any messages that
3663 have timed out, putting them in the timeouts
3665 INIT_LIST_HEAD(&timeouts);
3666 spin_lock_irqsave(&intf->seq_lock, flags);
3667 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3668 check_msg_timeout(intf, &(intf->seq_table[i]),
3669 &timeouts, timeout_period, i,
3671 spin_unlock_irqrestore(&intf->seq_lock, flags);
3673 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3674 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3677 * Maintenance mode handling. Check the timeout
3678 * optimistically before we claim the lock. It may
3679 * mean a timeout gets missed occasionally, but that
3680 * only means the timeout gets extended by one period
3681 * in that case. No big deal, and it avoids the lock
3684 if (intf->auto_maintenance_timeout > 0) {
3685 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3686 if (intf->auto_maintenance_timeout > 0) {
3687 intf->auto_maintenance_timeout
3689 if (!intf->maintenance_mode
3690 && (intf->auto_maintenance_timeout <= 0))
3692 intf->maintenance_mode_enable = 0;
3693 maintenance_mode_update(intf);
3696 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3703 static void ipmi_request_event(void)
3706 struct ipmi_smi_handlers *handlers;
3709 /* Called from the timer, no need to check if handlers is
3711 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3712 /* No event requests when in maintenance mode. */
3713 if (intf->maintenance_mode_enable)
3716 handlers = intf->handlers;
3718 handlers->request_events(intf->send_info);
3723 static struct timer_list ipmi_timer;
3725 /* Call every ~100 ms. */
3726 #define IPMI_TIMEOUT_TIME 100
3728 /* How many jiffies does it take to get to the timeout time. */
3729 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3731 /* Request events from the queue every second (this is the number of
3732 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3733 future, IPMI will add a way to know immediately if an event is in
3734 the queue and this silliness can go away. */
3735 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3737 static atomic_t stop_operation;
3738 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3740 static void ipmi_timeout(unsigned long data)
3742 if (atomic_read(&stop_operation))
3746 if (ticks_to_req_ev == 0) {
3747 ipmi_request_event();
3748 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3751 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3753 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3757 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3758 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3760 /* FIXME - convert these to slabs. */
3761 static void free_smi_msg(struct ipmi_smi_msg *msg)
3763 atomic_dec(&smi_msg_inuse_count);
3767 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3769 struct ipmi_smi_msg *rv;
3770 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3772 rv->done = free_smi_msg;
3773 rv->user_data = NULL;
3774 atomic_inc(&smi_msg_inuse_count);
3779 static void free_recv_msg(struct ipmi_recv_msg *msg)
3781 atomic_dec(&recv_msg_inuse_count);
3785 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3787 struct ipmi_recv_msg *rv;
3789 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3792 rv->done = free_recv_msg;
3793 atomic_inc(&recv_msg_inuse_count);
3798 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3801 kref_put(&msg->user->refcount, free_user);
3805 #ifdef CONFIG_IPMI_PANIC_EVENT
3807 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3811 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3815 #ifdef CONFIG_IPMI_PANIC_STRING
3816 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3818 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3819 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3820 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3821 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3823 /* A get event receiver command, save it. */
3824 intf->event_receiver = msg->msg.data[1];
3825 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3829 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3831 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3832 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3833 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3834 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3836 /* A get device id command, save if we are an event
3837 receiver or generator. */
3838 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3839 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3844 static void send_panic_events(char *str)
3846 struct kernel_ipmi_msg msg;
3848 unsigned char data[16];
3849 struct ipmi_system_interface_addr *si;
3850 struct ipmi_addr addr;
3851 struct ipmi_smi_msg smi_msg;
3852 struct ipmi_recv_msg recv_msg;
3854 si = (struct ipmi_system_interface_addr *) &addr;
3855 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3856 si->channel = IPMI_BMC_CHANNEL;
3859 /* Fill in an event telling that we have failed. */
3860 msg.netfn = 0x04; /* Sensor or Event. */
3861 msg.cmd = 2; /* Platform event command. */
3864 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3865 data[1] = 0x03; /* This is for IPMI 1.0. */
3866 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3867 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3868 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3870 /* Put a few breadcrumbs in. Hopefully later we can add more things
3871 to make the panic events more useful. */
3878 smi_msg.done = dummy_smi_done_handler;
3879 recv_msg.done = dummy_recv_done_handler;
3881 /* For every registered interface, send the event. */
3882 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3883 if (!intf->handlers)
3884 /* Interface is not ready. */
3887 /* Send the event announcing the panic. */
3888 intf->handlers->set_run_to_completion(intf->send_info, 1);
3889 i_ipmi_request(NULL,
3898 intf->channels[0].address,
3899 intf->channels[0].lun,
3900 0, 1); /* Don't retry, and don't wait. */
3903 #ifdef CONFIG_IPMI_PANIC_STRING
3904 /* On every interface, dump a bunch of OEM event holding the
3909 /* For every registered interface, send the event. */
3910 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3912 struct ipmi_ipmb_addr *ipmb;
3915 if (intf->intf_num == -1)
3916 /* Interface was not ready yet. */
3920 * intf_num is used as an marker to tell if the
3921 * interface is valid. Thus we need a read barrier to
3922 * make sure data fetched before checking intf_num
3927 /* First job here is to figure out where to send the
3928 OEM events. There's no way in IPMI to send OEM
3929 events using an event send command, so we have to
3930 find the SEL to put them in and stick them in
3933 /* Get capabilities from the get device id. */
3934 intf->local_sel_device = 0;
3935 intf->local_event_generator = 0;
3936 intf->event_receiver = 0;
3938 /* Request the device info from the local MC. */
3939 msg.netfn = IPMI_NETFN_APP_REQUEST;
3940 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3943 intf->null_user_handler = device_id_fetcher;
3944 i_ipmi_request(NULL,
3953 intf->channels[0].address,
3954 intf->channels[0].lun,
3955 0, 1); /* Don't retry, and don't wait. */
3957 if (intf->local_event_generator) {
3958 /* Request the event receiver from the local MC. */
3959 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3960 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3963 intf->null_user_handler = event_receiver_fetcher;
3964 i_ipmi_request(NULL,
3973 intf->channels[0].address,
3974 intf->channels[0].lun,
3975 0, 1); /* no retry, and no wait. */
3977 intf->null_user_handler = NULL;
3979 /* Validate the event receiver. The low bit must not
3980 be 1 (it must be a valid IPMB address), it cannot
3981 be zero, and it must not be my address. */
3982 if (((intf->event_receiver & 1) == 0)
3983 && (intf->event_receiver != 0)
3984 && (intf->event_receiver != intf->channels[0].address))
3986 /* The event receiver is valid, send an IPMB
3988 ipmb = (struct ipmi_ipmb_addr *) &addr;
3989 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3990 ipmb->channel = 0; /* FIXME - is this right? */
3991 ipmb->lun = intf->event_receiver_lun;
3992 ipmb->slave_addr = intf->event_receiver;
3993 } else if (intf->local_sel_device) {
3994 /* The event receiver was not valid (or was
3995 me), but I am an SEL device, just dump it
3997 si = (struct ipmi_system_interface_addr *) &addr;
3998 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3999 si->channel = IPMI_BMC_CHANNEL;
4002 continue; /* No where to send the event. */
4005 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4006 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4012 int size = strlen(p);
4018 data[2] = 0xf0; /* OEM event without timestamp. */
4019 data[3] = intf->channels[0].address;
4020 data[4] = j++; /* sequence # */
4021 /* Always give 11 bytes, so strncpy will fill
4022 it with zeroes for me. */
4023 strncpy(data+5, p, 11);
4026 i_ipmi_request(NULL,
4035 intf->channels[0].address,
4036 intf->channels[0].lun,
4037 0, 1); /* no retry, and no wait. */
4040 #endif /* CONFIG_IPMI_PANIC_STRING */
4042 #endif /* CONFIG_IPMI_PANIC_EVENT */
4044 static int has_panicked;
4046 static int panic_event(struct notifier_block *this,
4047 unsigned long event,
4056 /* For every registered interface, set it to run to completion. */
4057 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4058 if (!intf->handlers)
4059 /* Interface is not ready. */
4062 intf->handlers->set_run_to_completion(intf->send_info, 1);
4065 #ifdef CONFIG_IPMI_PANIC_EVENT
4066 send_panic_events(ptr);
4072 static struct notifier_block panic_block = {
4073 .notifier_call = panic_event,
4075 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4078 static int ipmi_init_msghandler(void)
4085 rv = driver_register(&ipmidriver);
4087 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4091 printk(KERN_INFO "ipmi message handler version "
4092 IPMI_DRIVER_VERSION "\n");
4094 #ifdef CONFIG_PROC_FS
4095 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4096 if (!proc_ipmi_root) {
4097 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4101 proc_ipmi_root->owner = THIS_MODULE;
4102 #endif /* CONFIG_PROC_FS */
4104 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4105 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4107 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4114 static __init int ipmi_init_msghandler_mod(void)
4116 ipmi_init_msghandler();
4120 static __exit void cleanup_ipmi(void)
4127 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4129 /* This can't be called if any interfaces exist, so no worry about
4130 shutting down the interfaces. */
4132 /* Tell the timer to stop, then wait for it to stop. This avoids
4133 problems with race conditions removing the timer here. */
4134 atomic_inc(&stop_operation);
4135 del_timer_sync(&ipmi_timer);
4137 #ifdef CONFIG_PROC_FS
4138 remove_proc_entry(proc_ipmi_root->name, NULL);
4139 #endif /* CONFIG_PROC_FS */
4141 driver_unregister(&ipmidriver);
4145 /* Check for buffer leaks. */
4146 count = atomic_read(&smi_msg_inuse_count);
4148 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4150 count = atomic_read(&recv_msg_inuse_count);
4152 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4155 module_exit(cleanup_ipmi);
4157 module_init(ipmi_init_msghandler_mod);
4158 MODULE_LICENSE("GPL");
4159 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4160 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4161 MODULE_VERSION(IPMI_DRIVER_VERSION);
4163 EXPORT_SYMBOL(ipmi_create_user);
4164 EXPORT_SYMBOL(ipmi_destroy_user);
4165 EXPORT_SYMBOL(ipmi_get_version);
4166 EXPORT_SYMBOL(ipmi_request_settime);
4167 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4168 EXPORT_SYMBOL(ipmi_poll_interface);
4169 EXPORT_SYMBOL(ipmi_register_smi);
4170 EXPORT_SYMBOL(ipmi_unregister_smi);
4171 EXPORT_SYMBOL(ipmi_register_for_cmd);
4172 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4173 EXPORT_SYMBOL(ipmi_smi_msg_received);
4174 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4175 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4176 EXPORT_SYMBOL(ipmi_addr_length);
4177 EXPORT_SYMBOL(ipmi_validate_addr);
4178 EXPORT_SYMBOL(ipmi_set_gets_events);
4179 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4180 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4181 EXPORT_SYMBOL(ipmi_set_my_address);
4182 EXPORT_SYMBOL(ipmi_get_my_address);
4183 EXPORT_SYMBOL(ipmi_set_my_LUN);
4184 EXPORT_SYMBOL(ipmi_get_my_LUN);
4185 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4186 EXPORT_SYMBOL(ipmi_free_recv_msg);