4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
48 #define PFX "IPMI message handler: "
50 #define IPMI_DRIVER_VERSION "39.1"
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized;
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
72 * The main "user" data structure.
76 struct list_head link;
78 /* Set to "0" when the user is destroyed. */
83 /* The upper layer that handles receive messages. */
84 struct ipmi_user_hndl *handler;
87 /* The interface this user is bound to. */
90 /* Does this interface receive IPMI events? */
96 struct list_head link;
104 * This is used to form a linked lised during mass deletion.
105 * Since this is in an RCU list, we cannot use the link above
106 * or change any data until the RCU period completes. So we
107 * use this next variable during mass deletion so we can have
108 * a list and don't have to wait and restart the search on
109 * every individual deletion of a command. */
110 struct cmd_rcvr *next;
115 unsigned int inuse : 1;
116 unsigned int broadcast : 1;
118 unsigned long timeout;
119 unsigned long orig_timeout;
120 unsigned int retries_left;
122 /* To verify on an incoming send message response that this is
123 the message that the response is for, we keep a sequence id
124 and increment it every time we send a message. */
127 /* This is held so we can properly respond to the message on a
128 timeout, and it is used to hold the temporary data for
129 retransmission, too. */
130 struct ipmi_recv_msg *recv_msg;
133 /* Store the information in a msgid (long) to allow us to find a
134 sequence table entry from the msgid. */
135 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
137 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
139 seq = ((msgid >> 26) & 0x3f); \
140 seqid = (msgid & 0x3fffff); \
143 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
147 unsigned char medium;
148 unsigned char protocol;
150 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
151 but may be changed by the user. */
152 unsigned char address;
154 /* My LUN. This should generally stay the SMS LUN, but just in
159 #ifdef CONFIG_PROC_FS
160 struct ipmi_proc_entry
163 struct ipmi_proc_entry *next;
169 struct platform_device *dev;
170 struct ipmi_device_id id;
171 unsigned char guid[16];
174 struct kref refcount;
176 /* bmc device attributes */
177 struct device_attribute device_id_attr;
178 struct device_attribute provides_dev_sdrs_attr;
179 struct device_attribute revision_attr;
180 struct device_attribute firmware_rev_attr;
181 struct device_attribute version_attr;
182 struct device_attribute add_dev_support_attr;
183 struct device_attribute manufacturer_id_attr;
184 struct device_attribute product_id_attr;
185 struct device_attribute guid_attr;
186 struct device_attribute aux_firmware_rev_attr;
189 #define IPMI_IPMB_NUM_SEQ 64
190 #define IPMI_MAX_CHANNELS 16
193 /* What interface number are we? */
196 struct kref refcount;
198 /* Used for a list of interfaces. */
199 struct list_head link;
201 /* The list of upper layers that are using me. seq_lock
203 struct list_head users;
205 /* Information to supply to users. */
206 unsigned char ipmi_version_major;
207 unsigned char ipmi_version_minor;
209 /* Used for wake ups at startup. */
210 wait_queue_head_t waitq;
212 struct bmc_device *bmc;
216 /* This is the lower-layer's sender routine. Note that you
217 * must either be holding the ipmi_interfaces_mutex or be in
218 * an umpreemptible region to use this. You must fetch the
219 * value into a local variable and make sure it is not NULL. */
220 struct ipmi_smi_handlers *handlers;
223 #ifdef CONFIG_PROC_FS
224 /* A list of proc entries for this interface. */
225 struct mutex proc_entry_lock;
226 struct ipmi_proc_entry *proc_entries;
229 /* Driver-model device for the system interface. */
230 struct device *si_dev;
232 /* A table of sequence numbers for this interface. We use the
233 sequence numbers for IPMB messages that go out of the
234 interface to match them up with their responses. A routine
235 is called periodically to time the items in this list. */
237 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
240 /* Messages that were delayed for some reason (out of memory,
241 for instance), will go in here to be processed later in a
242 periodic timer interrupt. */
243 spinlock_t waiting_msgs_lock;
244 struct list_head waiting_msgs;
246 /* The list of command receivers that are registered for commands
247 on this interface. */
248 struct mutex cmd_rcvrs_mutex;
249 struct list_head cmd_rcvrs;
251 /* Events that were queues because no one was there to receive
253 spinlock_t events_lock; /* For dealing with event stuff. */
254 struct list_head waiting_events;
255 unsigned int waiting_events_count; /* How many events in queue? */
256 int delivering_events;
258 /* The event receiver for my BMC, only really used at panic
259 shutdown as a place to store this. */
260 unsigned char event_receiver;
261 unsigned char event_receiver_lun;
262 unsigned char local_sel_device;
263 unsigned char local_event_generator;
265 /* For handling of maintenance mode. */
266 int maintenance_mode;
267 int maintenance_mode_enable;
268 int auto_maintenance_timeout;
269 spinlock_t maintenance_mode_lock; /* Used in a timer... */
271 /* A cheap hack, if this is non-null and a message to an
272 interface comes in with a NULL user, call this routine with
273 it. Note that the message will still be freed by the
274 caller. This only works on the system interface. */
275 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
277 /* When we are scanning the channels for an SMI, this will
278 tell which channel we are scanning. */
281 /* Channel information */
282 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
285 struct proc_dir_entry *proc_dir;
286 char proc_dir_name[10];
288 spinlock_t counter_lock; /* For making counters atomic. */
290 /* Commands we got that were invalid. */
291 unsigned int sent_invalid_commands;
293 /* Commands we sent to the MC. */
294 unsigned int sent_local_commands;
295 /* Responses from the MC that were delivered to a user. */
296 unsigned int handled_local_responses;
297 /* Responses from the MC that were not delivered to a user. */
298 unsigned int unhandled_local_responses;
300 /* Commands we sent out to the IPMB bus. */
301 unsigned int sent_ipmb_commands;
302 /* Commands sent on the IPMB that had errors on the SEND CMD */
303 unsigned int sent_ipmb_command_errs;
304 /* Each retransmit increments this count. */
305 unsigned int retransmitted_ipmb_commands;
306 /* When a message times out (runs out of retransmits) this is
308 unsigned int timed_out_ipmb_commands;
310 /* This is like above, but for broadcasts. Broadcasts are
311 *not* included in the above count (they are expected to
313 unsigned int timed_out_ipmb_broadcasts;
315 /* Responses I have sent to the IPMB bus. */
316 unsigned int sent_ipmb_responses;
318 /* The response was delivered to the user. */
319 unsigned int handled_ipmb_responses;
320 /* The response had invalid data in it. */
321 unsigned int invalid_ipmb_responses;
322 /* The response didn't have anyone waiting for it. */
323 unsigned int unhandled_ipmb_responses;
325 /* Commands we sent out to the IPMB bus. */
326 unsigned int sent_lan_commands;
327 /* Commands sent on the IPMB that had errors on the SEND CMD */
328 unsigned int sent_lan_command_errs;
329 /* Each retransmit increments this count. */
330 unsigned int retransmitted_lan_commands;
331 /* When a message times out (runs out of retransmits) this is
333 unsigned int timed_out_lan_commands;
335 /* Responses I have sent to the IPMB bus. */
336 unsigned int sent_lan_responses;
338 /* The response was delivered to the user. */
339 unsigned int handled_lan_responses;
340 /* The response had invalid data in it. */
341 unsigned int invalid_lan_responses;
342 /* The response didn't have anyone waiting for it. */
343 unsigned int unhandled_lan_responses;
345 /* The command was delivered to the user. */
346 unsigned int handled_commands;
347 /* The command had invalid data in it. */
348 unsigned int invalid_commands;
349 /* The command didn't have anyone waiting for it. */
350 unsigned int unhandled_commands;
352 /* Invalid data in an event. */
353 unsigned int invalid_events;
355 /* Events that were received with the proper format. */
359 * run_to_completion duplicate of smb_info, smi_info
360 * and ipmi_serial_info structures. Used to decrease numbers of
361 * parameters passed by "low" level IPMI code.
363 int run_to_completion;
365 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
368 * The driver model view of the IPMI messaging driver.
370 static struct device_driver ipmidriver = {
372 .bus = &platform_bus_type
374 static DEFINE_MUTEX(ipmidriver_mutex);
376 static LIST_HEAD(ipmi_interfaces);
377 static DEFINE_MUTEX(ipmi_interfaces_mutex);
379 /* List of watchers that want to know when smi's are added and
381 static LIST_HEAD(smi_watchers);
382 static DEFINE_MUTEX(smi_watchers_mutex);
385 static void free_recv_msg_list(struct list_head *q)
387 struct ipmi_recv_msg *msg, *msg2;
389 list_for_each_entry_safe(msg, msg2, q, link) {
390 list_del(&msg->link);
391 ipmi_free_recv_msg(msg);
395 static void free_smi_msg_list(struct list_head *q)
397 struct ipmi_smi_msg *msg, *msg2;
399 list_for_each_entry_safe(msg, msg2, q, link) {
400 list_del(&msg->link);
401 ipmi_free_smi_msg(msg);
405 static void clean_up_interface_data(ipmi_smi_t intf)
408 struct cmd_rcvr *rcvr, *rcvr2;
409 struct list_head list;
411 free_smi_msg_list(&intf->waiting_msgs);
412 free_recv_msg_list(&intf->waiting_events);
415 * Wholesale remove all the entries from the list in the
416 * interface and wait for RCU to know that none are in use.
418 mutex_lock(&intf->cmd_rcvrs_mutex);
419 INIT_LIST_HEAD(&list);
420 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
421 mutex_unlock(&intf->cmd_rcvrs_mutex);
423 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
426 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
427 if ((intf->seq_table[i].inuse)
428 && (intf->seq_table[i].recv_msg))
430 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
435 static void intf_free(struct kref *ref)
437 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
439 clean_up_interface_data(intf);
443 struct watcher_entry {
446 struct list_head link;
449 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
452 LIST_HEAD(to_deliver);
453 struct watcher_entry *e, *e2;
455 mutex_lock(&smi_watchers_mutex);
457 mutex_lock(&ipmi_interfaces_mutex);
459 /* Build a list of things to deliver. */
460 list_for_each_entry(intf, &ipmi_interfaces, link) {
461 if (intf->intf_num == -1)
463 e = kmalloc(sizeof(*e), GFP_KERNEL);
466 kref_get(&intf->refcount);
468 e->intf_num = intf->intf_num;
469 list_add_tail(&e->link, &to_deliver);
472 /* We will succeed, so add it to the list. */
473 list_add(&watcher->link, &smi_watchers);
475 mutex_unlock(&ipmi_interfaces_mutex);
477 list_for_each_entry_safe(e, e2, &to_deliver, link) {
479 watcher->new_smi(e->intf_num, e->intf->si_dev);
480 kref_put(&e->intf->refcount, intf_free);
484 mutex_unlock(&smi_watchers_mutex);
489 mutex_unlock(&ipmi_interfaces_mutex);
490 mutex_unlock(&smi_watchers_mutex);
491 list_for_each_entry_safe(e, e2, &to_deliver, link) {
493 kref_put(&e->intf->refcount, intf_free);
499 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
501 mutex_lock(&smi_watchers_mutex);
502 list_del(&(watcher->link));
503 mutex_unlock(&smi_watchers_mutex);
508 * Must be called with smi_watchers_mutex held.
511 call_smi_watchers(int i, struct device *dev)
513 struct ipmi_smi_watcher *w;
515 list_for_each_entry(w, &smi_watchers, link) {
516 if (try_module_get(w->owner)) {
518 module_put(w->owner);
524 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
526 if (addr1->addr_type != addr2->addr_type)
529 if (addr1->channel != addr2->channel)
532 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
533 struct ipmi_system_interface_addr *smi_addr1
534 = (struct ipmi_system_interface_addr *) addr1;
535 struct ipmi_system_interface_addr *smi_addr2
536 = (struct ipmi_system_interface_addr *) addr2;
537 return (smi_addr1->lun == smi_addr2->lun);
540 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
541 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
543 struct ipmi_ipmb_addr *ipmb_addr1
544 = (struct ipmi_ipmb_addr *) addr1;
545 struct ipmi_ipmb_addr *ipmb_addr2
546 = (struct ipmi_ipmb_addr *) addr2;
548 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
549 && (ipmb_addr1->lun == ipmb_addr2->lun));
552 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
553 struct ipmi_lan_addr *lan_addr1
554 = (struct ipmi_lan_addr *) addr1;
555 struct ipmi_lan_addr *lan_addr2
556 = (struct ipmi_lan_addr *) addr2;
558 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
559 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
560 && (lan_addr1->session_handle
561 == lan_addr2->session_handle)
562 && (lan_addr1->lun == lan_addr2->lun));
568 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
570 if (len < sizeof(struct ipmi_system_interface_addr)) {
574 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
575 if (addr->channel != IPMI_BMC_CHANNEL)
580 if ((addr->channel == IPMI_BMC_CHANNEL)
581 || (addr->channel >= IPMI_MAX_CHANNELS)
582 || (addr->channel < 0))
585 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
586 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
588 if (len < sizeof(struct ipmi_ipmb_addr)) {
594 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
595 if (len < sizeof(struct ipmi_lan_addr)) {
604 unsigned int ipmi_addr_length(int addr_type)
606 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
607 return sizeof(struct ipmi_system_interface_addr);
609 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
610 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
612 return sizeof(struct ipmi_ipmb_addr);
615 if (addr_type == IPMI_LAN_ADDR_TYPE)
616 return sizeof(struct ipmi_lan_addr);
621 static void deliver_response(struct ipmi_recv_msg *msg)
624 ipmi_smi_t intf = msg->user_msg_data;
627 /* Special handling for NULL users. */
628 if (intf->null_user_handler) {
629 intf->null_user_handler(intf, msg);
630 spin_lock_irqsave(&intf->counter_lock, flags);
631 intf->handled_local_responses++;
632 spin_unlock_irqrestore(&intf->counter_lock, flags);
634 /* No handler, so give up. */
635 spin_lock_irqsave(&intf->counter_lock, flags);
636 intf->unhandled_local_responses++;
637 spin_unlock_irqrestore(&intf->counter_lock, flags);
639 ipmi_free_recv_msg(msg);
641 ipmi_user_t user = msg->user;
642 user->handler->ipmi_recv_hndl(msg, user->handler_data);
647 deliver_err_response(struct ipmi_recv_msg *msg, int err)
649 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
650 msg->msg_data[0] = err;
651 msg->msg.netfn |= 1; /* Convert to a response. */
652 msg->msg.data_len = 1;
653 msg->msg.data = msg->msg_data;
654 deliver_response(msg);
657 /* Find the next sequence number not being used and add the given
658 message with the given timeout to the sequence table. This must be
659 called with the interface's seq_lock held. */
660 static int intf_next_seq(ipmi_smi_t intf,
661 struct ipmi_recv_msg *recv_msg,
662 unsigned long timeout,
671 for (i = intf->curr_seq;
672 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
673 i = (i+1)%IPMI_IPMB_NUM_SEQ)
675 if (!intf->seq_table[i].inuse)
679 if (!intf->seq_table[i].inuse) {
680 intf->seq_table[i].recv_msg = recv_msg;
682 /* Start with the maximum timeout, when the send response
683 comes in we will start the real timer. */
684 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
685 intf->seq_table[i].orig_timeout = timeout;
686 intf->seq_table[i].retries_left = retries;
687 intf->seq_table[i].broadcast = broadcast;
688 intf->seq_table[i].inuse = 1;
689 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
691 *seqid = intf->seq_table[i].seqid;
692 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
700 /* Return the receive message for the given sequence number and
701 release the sequence number so it can be reused. Some other data
702 is passed in to be sure the message matches up correctly (to help
703 guard against message coming in after their timeout and the
704 sequence number being reused). */
705 static int intf_find_seq(ipmi_smi_t intf,
710 struct ipmi_addr *addr,
711 struct ipmi_recv_msg **recv_msg)
716 if (seq >= IPMI_IPMB_NUM_SEQ)
719 spin_lock_irqsave(&(intf->seq_lock), flags);
720 if (intf->seq_table[seq].inuse) {
721 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
723 if ((msg->addr.channel == channel)
724 && (msg->msg.cmd == cmd)
725 && (msg->msg.netfn == netfn)
726 && (ipmi_addr_equal(addr, &(msg->addr))))
729 intf->seq_table[seq].inuse = 0;
733 spin_unlock_irqrestore(&(intf->seq_lock), flags);
739 /* Start the timer for a specific sequence table entry. */
740 static int intf_start_seq_timer(ipmi_smi_t intf,
749 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
751 spin_lock_irqsave(&(intf->seq_lock), flags);
752 /* We do this verification because the user can be deleted
753 while a message is outstanding. */
754 if ((intf->seq_table[seq].inuse)
755 && (intf->seq_table[seq].seqid == seqid))
757 struct seq_table *ent = &(intf->seq_table[seq]);
758 ent->timeout = ent->orig_timeout;
761 spin_unlock_irqrestore(&(intf->seq_lock), flags);
766 /* Got an error for the send message for a specific sequence number. */
767 static int intf_err_seq(ipmi_smi_t intf,
775 struct ipmi_recv_msg *msg = NULL;
778 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
780 spin_lock_irqsave(&(intf->seq_lock), flags);
781 /* We do this verification because the user can be deleted
782 while a message is outstanding. */
783 if ((intf->seq_table[seq].inuse)
784 && (intf->seq_table[seq].seqid == seqid))
786 struct seq_table *ent = &(intf->seq_table[seq]);
792 spin_unlock_irqrestore(&(intf->seq_lock), flags);
795 deliver_err_response(msg, err);
801 int ipmi_create_user(unsigned int if_num,
802 struct ipmi_user_hndl *handler,
807 ipmi_user_t new_user;
811 /* There is no module usecount here, because it's not
812 required. Since this can only be used by and called from
813 other modules, they will implicitly use this module, and
814 thus this can't be removed unless the other modules are
820 /* Make sure the driver is actually initialized, this handles
821 problems with initialization order. */
823 rv = ipmi_init_msghandler();
827 /* The init code doesn't return an error if it was turned
828 off, but it won't initialize. Check that. */
833 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
837 mutex_lock(&ipmi_interfaces_mutex);
838 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
839 if (intf->intf_num == if_num)
842 /* Not found, return an error */
847 /* Note that each existing user holds a refcount to the interface. */
848 kref_get(&intf->refcount);
850 kref_init(&new_user->refcount);
851 new_user->handler = handler;
852 new_user->handler_data = handler_data;
853 new_user->intf = intf;
854 new_user->gets_events = 0;
856 if (!try_module_get(intf->handlers->owner)) {
861 if (intf->handlers->inc_usecount) {
862 rv = intf->handlers->inc_usecount(intf->send_info);
864 module_put(intf->handlers->owner);
869 /* Hold the lock so intf->handlers is guaranteed to be good
871 mutex_unlock(&ipmi_interfaces_mutex);
874 spin_lock_irqsave(&intf->seq_lock, flags);
875 list_add_rcu(&new_user->link, &intf->users);
876 spin_unlock_irqrestore(&intf->seq_lock, flags);
881 kref_put(&intf->refcount, intf_free);
883 mutex_unlock(&ipmi_interfaces_mutex);
888 static void free_user(struct kref *ref)
890 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
894 int ipmi_destroy_user(ipmi_user_t user)
896 ipmi_smi_t intf = user->intf;
899 struct cmd_rcvr *rcvr;
900 struct cmd_rcvr *rcvrs = NULL;
904 /* Remove the user from the interface's sequence table. */
905 spin_lock_irqsave(&intf->seq_lock, flags);
906 list_del_rcu(&user->link);
908 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
909 if (intf->seq_table[i].inuse
910 && (intf->seq_table[i].recv_msg->user == user))
912 intf->seq_table[i].inuse = 0;
913 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
916 spin_unlock_irqrestore(&intf->seq_lock, flags);
919 * Remove the user from the command receiver's table. First
920 * we build a list of everything (not using the standard link,
921 * since other things may be using it till we do
922 * synchronize_rcu()) then free everything in that list.
924 mutex_lock(&intf->cmd_rcvrs_mutex);
925 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
926 if (rcvr->user == user) {
927 list_del_rcu(&rcvr->link);
932 mutex_unlock(&intf->cmd_rcvrs_mutex);
940 mutex_lock(&ipmi_interfaces_mutex);
941 if (intf->handlers) {
942 module_put(intf->handlers->owner);
943 if (intf->handlers->dec_usecount)
944 intf->handlers->dec_usecount(intf->send_info);
946 mutex_unlock(&ipmi_interfaces_mutex);
948 kref_put(&intf->refcount, intf_free);
950 kref_put(&user->refcount, free_user);
955 void ipmi_get_version(ipmi_user_t user,
956 unsigned char *major,
957 unsigned char *minor)
959 *major = user->intf->ipmi_version_major;
960 *minor = user->intf->ipmi_version_minor;
963 int ipmi_set_my_address(ipmi_user_t user,
964 unsigned int channel,
965 unsigned char address)
967 if (channel >= IPMI_MAX_CHANNELS)
969 user->intf->channels[channel].address = address;
973 int ipmi_get_my_address(ipmi_user_t user,
974 unsigned int channel,
975 unsigned char *address)
977 if (channel >= IPMI_MAX_CHANNELS)
979 *address = user->intf->channels[channel].address;
983 int ipmi_set_my_LUN(ipmi_user_t user,
984 unsigned int channel,
987 if (channel >= IPMI_MAX_CHANNELS)
989 user->intf->channels[channel].lun = LUN & 0x3;
993 int ipmi_get_my_LUN(ipmi_user_t user,
994 unsigned int channel,
995 unsigned char *address)
997 if (channel >= IPMI_MAX_CHANNELS)
999 *address = user->intf->channels[channel].lun;
1003 int ipmi_get_maintenance_mode(ipmi_user_t user)
1006 unsigned long flags;
1008 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1009 mode = user->intf->maintenance_mode;
1010 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1014 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1016 static void maintenance_mode_update(ipmi_smi_t intf)
1018 if (intf->handlers->set_maintenance_mode)
1019 intf->handlers->set_maintenance_mode(
1020 intf->send_info, intf->maintenance_mode_enable);
1023 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1026 unsigned long flags;
1027 ipmi_smi_t intf = user->intf;
1029 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1030 if (intf->maintenance_mode != mode) {
1032 case IPMI_MAINTENANCE_MODE_AUTO:
1033 intf->maintenance_mode = mode;
1034 intf->maintenance_mode_enable
1035 = (intf->auto_maintenance_timeout > 0);
1038 case IPMI_MAINTENANCE_MODE_OFF:
1039 intf->maintenance_mode = mode;
1040 intf->maintenance_mode_enable = 0;
1043 case IPMI_MAINTENANCE_MODE_ON:
1044 intf->maintenance_mode = mode;
1045 intf->maintenance_mode_enable = 1;
1053 maintenance_mode_update(intf);
1056 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1060 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1062 int ipmi_set_gets_events(ipmi_user_t user, int val)
1064 unsigned long flags;
1065 ipmi_smi_t intf = user->intf;
1066 struct ipmi_recv_msg *msg, *msg2;
1067 struct list_head msgs;
1069 INIT_LIST_HEAD(&msgs);
1071 spin_lock_irqsave(&intf->events_lock, flags);
1072 user->gets_events = val;
1074 if (intf->delivering_events)
1076 * Another thread is delivering events for this, so
1077 * let it handle any new events.
1081 /* Deliver any queued events. */
1082 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1083 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1084 list_move_tail(&msg->link, &msgs);
1085 intf->waiting_events_count = 0;
1087 intf->delivering_events = 1;
1088 spin_unlock_irqrestore(&intf->events_lock, flags);
1090 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1092 kref_get(&user->refcount);
1093 deliver_response(msg);
1096 spin_lock_irqsave(&intf->events_lock, flags);
1097 intf->delivering_events = 0;
1101 spin_unlock_irqrestore(&intf->events_lock, flags);
1106 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1107 unsigned char netfn,
1111 struct cmd_rcvr *rcvr;
1113 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1114 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1115 && (rcvr->chans & (1 << chan)))
1121 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1122 unsigned char netfn,
1126 struct cmd_rcvr *rcvr;
1128 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1129 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1130 && (rcvr->chans & chans))
1136 int ipmi_register_for_cmd(ipmi_user_t user,
1137 unsigned char netfn,
1141 ipmi_smi_t intf = user->intf;
1142 struct cmd_rcvr *rcvr;
1146 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1150 rcvr->netfn = netfn;
1151 rcvr->chans = chans;
1154 mutex_lock(&intf->cmd_rcvrs_mutex);
1155 /* Make sure the command/netfn is not already registered. */
1156 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1161 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1164 mutex_unlock(&intf->cmd_rcvrs_mutex);
1171 int ipmi_unregister_for_cmd(ipmi_user_t user,
1172 unsigned char netfn,
1176 ipmi_smi_t intf = user->intf;
1177 struct cmd_rcvr *rcvr;
1178 struct cmd_rcvr *rcvrs = NULL;
1179 int i, rv = -ENOENT;
1181 mutex_lock(&intf->cmd_rcvrs_mutex);
1182 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1183 if (((1 << i) & chans) == 0)
1185 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1188 if (rcvr->user == user) {
1190 rcvr->chans &= ~chans;
1191 if (rcvr->chans == 0) {
1192 list_del_rcu(&rcvr->link);
1198 mutex_unlock(&intf->cmd_rcvrs_mutex);
1208 static unsigned char
1209 ipmb_checksum(unsigned char *data, int size)
1211 unsigned char csum = 0;
1213 for (; size > 0; size--, data++)
1219 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1220 struct kernel_ipmi_msg *msg,
1221 struct ipmi_ipmb_addr *ipmb_addr,
1223 unsigned char ipmb_seq,
1225 unsigned char source_address,
1226 unsigned char source_lun)
1230 /* Format the IPMB header data. */
1231 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1232 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1233 smi_msg->data[2] = ipmb_addr->channel;
1235 smi_msg->data[3] = 0;
1236 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1237 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1238 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1239 smi_msg->data[i+6] = source_address;
1240 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1241 smi_msg->data[i+8] = msg->cmd;
1243 /* Now tack on the data to the message. */
1244 if (msg->data_len > 0)
1245 memcpy(&(smi_msg->data[i+9]), msg->data,
1247 smi_msg->data_size = msg->data_len + 9;
1249 /* Now calculate the checksum and tack it on. */
1250 smi_msg->data[i+smi_msg->data_size]
1251 = ipmb_checksum(&(smi_msg->data[i+6]),
1252 smi_msg->data_size-6);
1254 /* Add on the checksum size and the offset from the
1256 smi_msg->data_size += 1 + i;
1258 smi_msg->msgid = msgid;
1261 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1262 struct kernel_ipmi_msg *msg,
1263 struct ipmi_lan_addr *lan_addr,
1265 unsigned char ipmb_seq,
1266 unsigned char source_lun)
1268 /* Format the IPMB header data. */
1269 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1270 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1271 smi_msg->data[2] = lan_addr->channel;
1272 smi_msg->data[3] = lan_addr->session_handle;
1273 smi_msg->data[4] = lan_addr->remote_SWID;
1274 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1275 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1276 smi_msg->data[7] = lan_addr->local_SWID;
1277 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1278 smi_msg->data[9] = msg->cmd;
1280 /* Now tack on the data to the message. */
1281 if (msg->data_len > 0)
1282 memcpy(&(smi_msg->data[10]), msg->data,
1284 smi_msg->data_size = msg->data_len + 10;
1286 /* Now calculate the checksum and tack it on. */
1287 smi_msg->data[smi_msg->data_size]
1288 = ipmb_checksum(&(smi_msg->data[7]),
1289 smi_msg->data_size-7);
1291 /* Add on the checksum size and the offset from the
1293 smi_msg->data_size += 1;
1295 smi_msg->msgid = msgid;
1298 /* Separate from ipmi_request so that the user does not have to be
1299 supplied in certain circumstances (mainly at panic time). If
1300 messages are supplied, they will be freed, even if an error
1302 static int i_ipmi_request(ipmi_user_t user,
1304 struct ipmi_addr *addr,
1306 struct kernel_ipmi_msg *msg,
1307 void *user_msg_data,
1309 struct ipmi_recv_msg *supplied_recv,
1311 unsigned char source_address,
1312 unsigned char source_lun,
1314 unsigned int retry_time_ms)
1317 struct ipmi_smi_msg *smi_msg;
1318 struct ipmi_recv_msg *recv_msg;
1319 unsigned long flags;
1320 struct ipmi_smi_handlers *handlers;
1323 if (supplied_recv) {
1324 recv_msg = supplied_recv;
1326 recv_msg = ipmi_alloc_recv_msg();
1327 if (recv_msg == NULL) {
1331 recv_msg->user_msg_data = user_msg_data;
1334 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1336 smi_msg = ipmi_alloc_smi_msg();
1337 if (smi_msg == NULL) {
1338 ipmi_free_recv_msg(recv_msg);
1344 handlers = intf->handlers;
1350 recv_msg->user = user;
1352 kref_get(&user->refcount);
1353 recv_msg->msgid = msgid;
1354 /* Store the message to send in the receive message so timeout
1355 responses can get the proper response data. */
1356 recv_msg->msg = *msg;
1358 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1359 struct ipmi_system_interface_addr *smi_addr;
1361 if (msg->netfn & 1) {
1362 /* Responses are not allowed to the SMI. */
1367 smi_addr = (struct ipmi_system_interface_addr *) addr;
1368 if (smi_addr->lun > 3) {
1369 spin_lock_irqsave(&intf->counter_lock, flags);
1370 intf->sent_invalid_commands++;
1371 spin_unlock_irqrestore(&intf->counter_lock, flags);
1376 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1378 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1379 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1380 || (msg->cmd == IPMI_GET_MSG_CMD)
1381 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1383 /* We don't let the user do these, since we manage
1384 the sequence numbers. */
1385 spin_lock_irqsave(&intf->counter_lock, flags);
1386 intf->sent_invalid_commands++;
1387 spin_unlock_irqrestore(&intf->counter_lock, flags);
1392 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1393 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1394 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1395 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1397 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1398 intf->auto_maintenance_timeout
1399 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1400 if (!intf->maintenance_mode
1401 && !intf->maintenance_mode_enable)
1403 intf->maintenance_mode_enable = 1;
1404 maintenance_mode_update(intf);
1406 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1410 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1411 spin_lock_irqsave(&intf->counter_lock, flags);
1412 intf->sent_invalid_commands++;
1413 spin_unlock_irqrestore(&intf->counter_lock, flags);
1418 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1419 smi_msg->data[1] = msg->cmd;
1420 smi_msg->msgid = msgid;
1421 smi_msg->user_data = recv_msg;
1422 if (msg->data_len > 0)
1423 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1424 smi_msg->data_size = msg->data_len + 2;
1425 spin_lock_irqsave(&intf->counter_lock, flags);
1426 intf->sent_local_commands++;
1427 spin_unlock_irqrestore(&intf->counter_lock, flags);
1428 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1429 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1431 struct ipmi_ipmb_addr *ipmb_addr;
1432 unsigned char ipmb_seq;
1436 if (addr->channel >= IPMI_MAX_CHANNELS) {
1437 spin_lock_irqsave(&intf->counter_lock, flags);
1438 intf->sent_invalid_commands++;
1439 spin_unlock_irqrestore(&intf->counter_lock, flags);
1444 if (intf->channels[addr->channel].medium
1445 != IPMI_CHANNEL_MEDIUM_IPMB)
1447 spin_lock_irqsave(&intf->counter_lock, flags);
1448 intf->sent_invalid_commands++;
1449 spin_unlock_irqrestore(&intf->counter_lock, flags);
1455 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1456 retries = 0; /* Don't retry broadcasts. */
1460 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1461 /* Broadcasts add a zero at the beginning of the
1462 message, but otherwise is the same as an IPMB
1464 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1469 /* Default to 1 second retries. */
1470 if (retry_time_ms == 0)
1471 retry_time_ms = 1000;
1473 /* 9 for the header and 1 for the checksum, plus
1474 possibly one for the broadcast. */
1475 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1476 spin_lock_irqsave(&intf->counter_lock, flags);
1477 intf->sent_invalid_commands++;
1478 spin_unlock_irqrestore(&intf->counter_lock, flags);
1483 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1484 if (ipmb_addr->lun > 3) {
1485 spin_lock_irqsave(&intf->counter_lock, flags);
1486 intf->sent_invalid_commands++;
1487 spin_unlock_irqrestore(&intf->counter_lock, flags);
1492 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1494 if (recv_msg->msg.netfn & 0x1) {
1495 /* It's a response, so use the user's sequence
1497 spin_lock_irqsave(&intf->counter_lock, flags);
1498 intf->sent_ipmb_responses++;
1499 spin_unlock_irqrestore(&intf->counter_lock, flags);
1500 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1502 source_address, source_lun);
1504 /* Save the receive message so we can use it
1505 to deliver the response. */
1506 smi_msg->user_data = recv_msg;
1508 /* It's a command, so get a sequence for it. */
1510 spin_lock_irqsave(&(intf->seq_lock), flags);
1512 spin_lock(&intf->counter_lock);
1513 intf->sent_ipmb_commands++;
1514 spin_unlock(&intf->counter_lock);
1516 /* Create a sequence number with a 1 second
1517 timeout and 4 retries. */
1518 rv = intf_next_seq(intf,
1526 /* We have used up all the sequence numbers,
1527 probably, so abort. */
1528 spin_unlock_irqrestore(&(intf->seq_lock),
1533 /* Store the sequence number in the message,
1534 so that when the send message response
1535 comes back we can start the timer. */
1536 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1537 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1538 ipmb_seq, broadcast,
1539 source_address, source_lun);
1541 /* Copy the message into the recv message data, so we
1542 can retransmit it later if necessary. */
1543 memcpy(recv_msg->msg_data, smi_msg->data,
1544 smi_msg->data_size);
1545 recv_msg->msg.data = recv_msg->msg_data;
1546 recv_msg->msg.data_len = smi_msg->data_size;
1548 /* We don't unlock until here, because we need
1549 to copy the completed message into the
1550 recv_msg before we release the lock.
1551 Otherwise, race conditions may bite us. I
1552 know that's pretty paranoid, but I prefer
1554 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1556 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1557 struct ipmi_lan_addr *lan_addr;
1558 unsigned char ipmb_seq;
1561 if (addr->channel >= IPMI_MAX_CHANNELS) {
1562 spin_lock_irqsave(&intf->counter_lock, flags);
1563 intf->sent_invalid_commands++;
1564 spin_unlock_irqrestore(&intf->counter_lock, flags);
1569 if ((intf->channels[addr->channel].medium
1570 != IPMI_CHANNEL_MEDIUM_8023LAN)
1571 && (intf->channels[addr->channel].medium
1572 != IPMI_CHANNEL_MEDIUM_ASYNC))
1574 spin_lock_irqsave(&intf->counter_lock, flags);
1575 intf->sent_invalid_commands++;
1576 spin_unlock_irqrestore(&intf->counter_lock, flags);
1583 /* Default to 1 second retries. */
1584 if (retry_time_ms == 0)
1585 retry_time_ms = 1000;
1587 /* 11 for the header and 1 for the checksum. */
1588 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1589 spin_lock_irqsave(&intf->counter_lock, flags);
1590 intf->sent_invalid_commands++;
1591 spin_unlock_irqrestore(&intf->counter_lock, flags);
1596 lan_addr = (struct ipmi_lan_addr *) addr;
1597 if (lan_addr->lun > 3) {
1598 spin_lock_irqsave(&intf->counter_lock, flags);
1599 intf->sent_invalid_commands++;
1600 spin_unlock_irqrestore(&intf->counter_lock, flags);
1605 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1607 if (recv_msg->msg.netfn & 0x1) {
1608 /* It's a response, so use the user's sequence
1610 spin_lock_irqsave(&intf->counter_lock, flags);
1611 intf->sent_lan_responses++;
1612 spin_unlock_irqrestore(&intf->counter_lock, flags);
1613 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1616 /* Save the receive message so we can use it
1617 to deliver the response. */
1618 smi_msg->user_data = recv_msg;
1620 /* It's a command, so get a sequence for it. */
1622 spin_lock_irqsave(&(intf->seq_lock), flags);
1624 spin_lock(&intf->counter_lock);
1625 intf->sent_lan_commands++;
1626 spin_unlock(&intf->counter_lock);
1628 /* Create a sequence number with a 1 second
1629 timeout and 4 retries. */
1630 rv = intf_next_seq(intf,
1638 /* We have used up all the sequence numbers,
1639 probably, so abort. */
1640 spin_unlock_irqrestore(&(intf->seq_lock),
1645 /* Store the sequence number in the message,
1646 so that when the send message response
1647 comes back we can start the timer. */
1648 format_lan_msg(smi_msg, msg, lan_addr,
1649 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1650 ipmb_seq, source_lun);
1652 /* Copy the message into the recv message data, so we
1653 can retransmit it later if necessary. */
1654 memcpy(recv_msg->msg_data, smi_msg->data,
1655 smi_msg->data_size);
1656 recv_msg->msg.data = recv_msg->msg_data;
1657 recv_msg->msg.data_len = smi_msg->data_size;
1659 /* We don't unlock until here, because we need
1660 to copy the completed message into the
1661 recv_msg before we release the lock.
1662 Otherwise, race conditions may bite us. I
1663 know that's pretty paranoid, but I prefer
1665 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1668 /* Unknown address type. */
1669 spin_lock_irqsave(&intf->counter_lock, flags);
1670 intf->sent_invalid_commands++;
1671 spin_unlock_irqrestore(&intf->counter_lock, flags);
1679 for (m = 0; m < smi_msg->data_size; m++)
1680 printk(" %2.2x", smi_msg->data[m]);
1685 handlers->sender(intf->send_info, smi_msg, priority);
1692 ipmi_free_smi_msg(smi_msg);
1693 ipmi_free_recv_msg(recv_msg);
1697 static int check_addr(ipmi_smi_t intf,
1698 struct ipmi_addr *addr,
1699 unsigned char *saddr,
1702 if (addr->channel >= IPMI_MAX_CHANNELS)
1704 *lun = intf->channels[addr->channel].lun;
1705 *saddr = intf->channels[addr->channel].address;
1709 int ipmi_request_settime(ipmi_user_t user,
1710 struct ipmi_addr *addr,
1712 struct kernel_ipmi_msg *msg,
1713 void *user_msg_data,
1716 unsigned int retry_time_ms)
1718 unsigned char saddr, lun;
1723 rv = check_addr(user->intf, addr, &saddr, &lun);
1726 return i_ipmi_request(user,
1740 int ipmi_request_supply_msgs(ipmi_user_t user,
1741 struct ipmi_addr *addr,
1743 struct kernel_ipmi_msg *msg,
1744 void *user_msg_data,
1746 struct ipmi_recv_msg *supplied_recv,
1749 unsigned char saddr, lun;
1754 rv = check_addr(user->intf, addr, &saddr, &lun);
1757 return i_ipmi_request(user,
1771 #ifdef CONFIG_PROC_FS
1772 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1773 int count, int *eof, void *data)
1775 char *out = (char *) page;
1776 ipmi_smi_t intf = data;
1780 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1781 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1782 out[rv-1] = '\n'; /* Replace the final space with a newline */
1788 static int version_file_read_proc(char *page, char **start, off_t off,
1789 int count, int *eof, void *data)
1791 char *out = (char *) page;
1792 ipmi_smi_t intf = data;
1794 return sprintf(out, "%d.%d\n",
1795 ipmi_version_major(&intf->bmc->id),
1796 ipmi_version_minor(&intf->bmc->id));
1799 static int stat_file_read_proc(char *page, char **start, off_t off,
1800 int count, int *eof, void *data)
1802 char *out = (char *) page;
1803 ipmi_smi_t intf = data;
1805 out += sprintf(out, "sent_invalid_commands: %d\n",
1806 intf->sent_invalid_commands);
1807 out += sprintf(out, "sent_local_commands: %d\n",
1808 intf->sent_local_commands);
1809 out += sprintf(out, "handled_local_responses: %d\n",
1810 intf->handled_local_responses);
1811 out += sprintf(out, "unhandled_local_responses: %d\n",
1812 intf->unhandled_local_responses);
1813 out += sprintf(out, "sent_ipmb_commands: %d\n",
1814 intf->sent_ipmb_commands);
1815 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1816 intf->sent_ipmb_command_errs);
1817 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1818 intf->retransmitted_ipmb_commands);
1819 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1820 intf->timed_out_ipmb_commands);
1821 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1822 intf->timed_out_ipmb_broadcasts);
1823 out += sprintf(out, "sent_ipmb_responses: %d\n",
1824 intf->sent_ipmb_responses);
1825 out += sprintf(out, "handled_ipmb_responses: %d\n",
1826 intf->handled_ipmb_responses);
1827 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1828 intf->invalid_ipmb_responses);
1829 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1830 intf->unhandled_ipmb_responses);
1831 out += sprintf(out, "sent_lan_commands: %d\n",
1832 intf->sent_lan_commands);
1833 out += sprintf(out, "sent_lan_command_errs: %d\n",
1834 intf->sent_lan_command_errs);
1835 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1836 intf->retransmitted_lan_commands);
1837 out += sprintf(out, "timed_out_lan_commands: %d\n",
1838 intf->timed_out_lan_commands);
1839 out += sprintf(out, "sent_lan_responses: %d\n",
1840 intf->sent_lan_responses);
1841 out += sprintf(out, "handled_lan_responses: %d\n",
1842 intf->handled_lan_responses);
1843 out += sprintf(out, "invalid_lan_responses: %d\n",
1844 intf->invalid_lan_responses);
1845 out += sprintf(out, "unhandled_lan_responses: %d\n",
1846 intf->unhandled_lan_responses);
1847 out += sprintf(out, "handled_commands: %d\n",
1848 intf->handled_commands);
1849 out += sprintf(out, "invalid_commands: %d\n",
1850 intf->invalid_commands);
1851 out += sprintf(out, "unhandled_commands: %d\n",
1852 intf->unhandled_commands);
1853 out += sprintf(out, "invalid_events: %d\n",
1854 intf->invalid_events);
1855 out += sprintf(out, "events: %d\n",
1858 return (out - ((char *) page));
1860 #endif /* CONFIG_PROC_FS */
1862 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1863 read_proc_t *read_proc, write_proc_t *write_proc,
1864 void *data, struct module *owner)
1867 #ifdef CONFIG_PROC_FS
1868 struct proc_dir_entry *file;
1869 struct ipmi_proc_entry *entry;
1871 /* Create a list element. */
1872 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1875 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1880 strcpy(entry->name, name);
1882 file = create_proc_entry(name, 0, smi->proc_dir);
1889 file->read_proc = read_proc;
1890 file->write_proc = write_proc;
1891 file->owner = owner;
1893 mutex_lock(&smi->proc_entry_lock);
1894 /* Stick it on the list. */
1895 entry->next = smi->proc_entries;
1896 smi->proc_entries = entry;
1897 mutex_unlock(&smi->proc_entry_lock);
1899 #endif /* CONFIG_PROC_FS */
1904 static int add_proc_entries(ipmi_smi_t smi, int num)
1908 #ifdef CONFIG_PROC_FS
1909 sprintf(smi->proc_dir_name, "%d", num);
1910 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1914 smi->proc_dir->owner = THIS_MODULE;
1918 rv = ipmi_smi_add_proc_entry(smi, "stats",
1919 stat_file_read_proc, NULL,
1923 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1924 ipmb_file_read_proc, NULL,
1928 rv = ipmi_smi_add_proc_entry(smi, "version",
1929 version_file_read_proc, NULL,
1931 #endif /* CONFIG_PROC_FS */
1936 static void remove_proc_entries(ipmi_smi_t smi)
1938 #ifdef CONFIG_PROC_FS
1939 struct ipmi_proc_entry *entry;
1941 mutex_lock(&smi->proc_entry_lock);
1942 while (smi->proc_entries) {
1943 entry = smi->proc_entries;
1944 smi->proc_entries = entry->next;
1946 remove_proc_entry(entry->name, smi->proc_dir);
1950 mutex_unlock(&smi->proc_entry_lock);
1951 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1952 #endif /* CONFIG_PROC_FS */
1955 static int __find_bmc_guid(struct device *dev, void *data)
1957 unsigned char *id = data;
1958 struct bmc_device *bmc = dev_get_drvdata(dev);
1959 return memcmp(bmc->guid, id, 16) == 0;
1962 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1963 unsigned char *guid)
1967 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1969 return dev_get_drvdata(dev);
1974 struct prod_dev_id {
1975 unsigned int product_id;
1976 unsigned char device_id;
1979 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1981 struct prod_dev_id *id = data;
1982 struct bmc_device *bmc = dev_get_drvdata(dev);
1984 return (bmc->id.product_id == id->product_id
1985 && bmc->id.device_id == id->device_id);
1988 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1989 struct device_driver *drv,
1990 unsigned int product_id, unsigned char device_id)
1992 struct prod_dev_id id = {
1993 .product_id = product_id,
1994 .device_id = device_id,
1998 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2000 return dev_get_drvdata(dev);
2005 static ssize_t device_id_show(struct device *dev,
2006 struct device_attribute *attr,
2009 struct bmc_device *bmc = dev_get_drvdata(dev);
2011 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2014 static ssize_t provides_dev_sdrs_show(struct device *dev,
2015 struct device_attribute *attr,
2018 struct bmc_device *bmc = dev_get_drvdata(dev);
2020 return snprintf(buf, 10, "%u\n",
2021 (bmc->id.device_revision & 0x80) >> 7);
2024 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2027 struct bmc_device *bmc = dev_get_drvdata(dev);
2029 return snprintf(buf, 20, "%u\n",
2030 bmc->id.device_revision & 0x0F);
2033 static ssize_t firmware_rev_show(struct device *dev,
2034 struct device_attribute *attr,
2037 struct bmc_device *bmc = dev_get_drvdata(dev);
2039 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2040 bmc->id.firmware_revision_2);
2043 static ssize_t ipmi_version_show(struct device *dev,
2044 struct device_attribute *attr,
2047 struct bmc_device *bmc = dev_get_drvdata(dev);
2049 return snprintf(buf, 20, "%u.%u\n",
2050 ipmi_version_major(&bmc->id),
2051 ipmi_version_minor(&bmc->id));
2054 static ssize_t add_dev_support_show(struct device *dev,
2055 struct device_attribute *attr,
2058 struct bmc_device *bmc = dev_get_drvdata(dev);
2060 return snprintf(buf, 10, "0x%02x\n",
2061 bmc->id.additional_device_support);
2064 static ssize_t manufacturer_id_show(struct device *dev,
2065 struct device_attribute *attr,
2068 struct bmc_device *bmc = dev_get_drvdata(dev);
2070 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2073 static ssize_t product_id_show(struct device *dev,
2074 struct device_attribute *attr,
2077 struct bmc_device *bmc = dev_get_drvdata(dev);
2079 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2082 static ssize_t aux_firmware_rev_show(struct device *dev,
2083 struct device_attribute *attr,
2086 struct bmc_device *bmc = dev_get_drvdata(dev);
2088 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2089 bmc->id.aux_firmware_revision[3],
2090 bmc->id.aux_firmware_revision[2],
2091 bmc->id.aux_firmware_revision[1],
2092 bmc->id.aux_firmware_revision[0]);
2095 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2098 struct bmc_device *bmc = dev_get_drvdata(dev);
2100 return snprintf(buf, 100, "%Lx%Lx\n",
2101 (long long) bmc->guid[0],
2102 (long long) bmc->guid[8]);
2105 static void remove_files(struct bmc_device *bmc)
2110 device_remove_file(&bmc->dev->dev,
2111 &bmc->device_id_attr);
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->provides_dev_sdrs_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->revision_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->firmware_rev_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->version_attr);
2120 device_remove_file(&bmc->dev->dev,
2121 &bmc->add_dev_support_attr);
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->manufacturer_id_attr);
2124 device_remove_file(&bmc->dev->dev,
2125 &bmc->product_id_attr);
2127 if (bmc->id.aux_firmware_revision_set)
2128 device_remove_file(&bmc->dev->dev,
2129 &bmc->aux_firmware_rev_attr);
2131 device_remove_file(&bmc->dev->dev,
2136 cleanup_bmc_device(struct kref *ref)
2138 struct bmc_device *bmc;
2140 bmc = container_of(ref, struct bmc_device, refcount);
2143 platform_device_unregister(bmc->dev);
2147 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2149 struct bmc_device *bmc = intf->bmc;
2151 if (intf->sysfs_name) {
2152 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2153 kfree(intf->sysfs_name);
2154 intf->sysfs_name = NULL;
2156 if (intf->my_dev_name) {
2157 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2158 kfree(intf->my_dev_name);
2159 intf->my_dev_name = NULL;
2162 mutex_lock(&ipmidriver_mutex);
2163 kref_put(&bmc->refcount, cleanup_bmc_device);
2165 mutex_unlock(&ipmidriver_mutex);
2168 static int create_files(struct bmc_device *bmc)
2172 bmc->device_id_attr.attr.name = "device_id";
2173 bmc->device_id_attr.attr.mode = S_IRUGO;
2174 bmc->device_id_attr.show = device_id_show;
2176 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2177 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2178 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2180 bmc->revision_attr.attr.name = "revision";
2181 bmc->revision_attr.attr.mode = S_IRUGO;
2182 bmc->revision_attr.show = revision_show;
2184 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2185 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2186 bmc->firmware_rev_attr.show = firmware_rev_show;
2188 bmc->version_attr.attr.name = "ipmi_version";
2189 bmc->version_attr.attr.mode = S_IRUGO;
2190 bmc->version_attr.show = ipmi_version_show;
2192 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2193 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2194 bmc->add_dev_support_attr.show = add_dev_support_show;
2196 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2197 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2198 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2200 bmc->product_id_attr.attr.name = "product_id";
2201 bmc->product_id_attr.attr.mode = S_IRUGO;
2202 bmc->product_id_attr.show = product_id_show;
2204 bmc->guid_attr.attr.name = "guid";
2205 bmc->guid_attr.attr.mode = S_IRUGO;
2206 bmc->guid_attr.show = guid_show;
2208 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2209 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2210 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2212 err = device_create_file(&bmc->dev->dev,
2213 &bmc->device_id_attr);
2215 err = device_create_file(&bmc->dev->dev,
2216 &bmc->provides_dev_sdrs_attr);
2217 if (err) goto out_devid;
2218 err = device_create_file(&bmc->dev->dev,
2219 &bmc->revision_attr);
2220 if (err) goto out_sdrs;
2221 err = device_create_file(&bmc->dev->dev,
2222 &bmc->firmware_rev_attr);
2223 if (err) goto out_rev;
2224 err = device_create_file(&bmc->dev->dev,
2225 &bmc->version_attr);
2226 if (err) goto out_firm;
2227 err = device_create_file(&bmc->dev->dev,
2228 &bmc->add_dev_support_attr);
2229 if (err) goto out_version;
2230 err = device_create_file(&bmc->dev->dev,
2231 &bmc->manufacturer_id_attr);
2232 if (err) goto out_add_dev;
2233 err = device_create_file(&bmc->dev->dev,
2234 &bmc->product_id_attr);
2235 if (err) goto out_manu;
2236 if (bmc->id.aux_firmware_revision_set) {
2237 err = device_create_file(&bmc->dev->dev,
2238 &bmc->aux_firmware_rev_attr);
2239 if (err) goto out_prod_id;
2241 if (bmc->guid_set) {
2242 err = device_create_file(&bmc->dev->dev,
2244 if (err) goto out_aux_firm;
2250 if (bmc->id.aux_firmware_revision_set)
2251 device_remove_file(&bmc->dev->dev,
2252 &bmc->aux_firmware_rev_attr);
2254 device_remove_file(&bmc->dev->dev,
2255 &bmc->product_id_attr);
2257 device_remove_file(&bmc->dev->dev,
2258 &bmc->manufacturer_id_attr);
2260 device_remove_file(&bmc->dev->dev,
2261 &bmc->add_dev_support_attr);
2263 device_remove_file(&bmc->dev->dev,
2264 &bmc->version_attr);
2266 device_remove_file(&bmc->dev->dev,
2267 &bmc->firmware_rev_attr);
2269 device_remove_file(&bmc->dev->dev,
2270 &bmc->revision_attr);
2272 device_remove_file(&bmc->dev->dev,
2273 &bmc->provides_dev_sdrs_attr);
2275 device_remove_file(&bmc->dev->dev,
2276 &bmc->device_id_attr);
2281 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2282 const char *sysfs_name)
2285 struct bmc_device *bmc = intf->bmc;
2286 struct bmc_device *old_bmc;
2290 mutex_lock(&ipmidriver_mutex);
2293 * Try to find if there is an bmc_device struct
2294 * representing the interfaced BMC already
2297 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2299 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2304 * If there is already an bmc_device, free the new one,
2305 * otherwise register the new BMC device
2309 intf->bmc = old_bmc;
2312 kref_get(&bmc->refcount);
2313 mutex_unlock(&ipmidriver_mutex);
2316 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2317 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2318 bmc->id.manufacturer_id,
2323 unsigned char orig_dev_id = bmc->id.device_id;
2324 int warn_printed = 0;
2326 snprintf(name, sizeof(name),
2327 "ipmi_bmc.%4.4x", bmc->id.product_id);
2329 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2331 bmc->id.device_id)) {
2332 if (!warn_printed) {
2333 printk(KERN_WARNING PFX
2334 "This machine has two different BMCs"
2335 " with the same product id and device"
2336 " id. This is an error in the"
2337 " firmware, but incrementing the"
2338 " device id to work around the problem."
2339 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2340 bmc->id.product_id, bmc->id.device_id);
2343 bmc->id.device_id++; /* Wraps at 255 */
2344 if (bmc->id.device_id == orig_dev_id) {
2346 "Out of device ids!\n");
2351 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2353 mutex_unlock(&ipmidriver_mutex);
2356 " Unable to allocate platform device\n");
2359 bmc->dev->dev.driver = &ipmidriver;
2360 dev_set_drvdata(&bmc->dev->dev, bmc);
2361 kref_init(&bmc->refcount);
2363 rv = platform_device_add(bmc->dev);
2364 mutex_unlock(&ipmidriver_mutex);
2366 platform_device_put(bmc->dev);
2370 " Unable to register bmc device: %d\n",
2372 /* Don't go to out_err, you can only do that if
2373 the device is registered already. */
2377 rv = create_files(bmc);
2379 mutex_lock(&ipmidriver_mutex);
2380 platform_device_unregister(bmc->dev);
2381 mutex_unlock(&ipmidriver_mutex);
2387 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2388 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2389 bmc->id.manufacturer_id,
2395 * create symlink from system interface device to bmc device
2398 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2399 if (!intf->sysfs_name) {
2402 "ipmi_msghandler: allocate link to BMC: %d\n",
2407 rv = sysfs_create_link(&intf->si_dev->kobj,
2408 &bmc->dev->dev.kobj, intf->sysfs_name);
2410 kfree(intf->sysfs_name);
2411 intf->sysfs_name = NULL;
2413 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2418 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2419 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2420 if (!intf->my_dev_name) {
2421 kfree(intf->sysfs_name);
2422 intf->sysfs_name = NULL;
2425 "ipmi_msghandler: allocate link from BMC: %d\n",
2429 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2431 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2434 kfree(intf->sysfs_name);
2435 intf->sysfs_name = NULL;
2436 kfree(intf->my_dev_name);
2437 intf->my_dev_name = NULL;
2440 " Unable to create symlink to bmc: %d\n",
2448 ipmi_bmc_unregister(intf);
2453 send_guid_cmd(ipmi_smi_t intf, int chan)
2455 struct kernel_ipmi_msg msg;
2456 struct ipmi_system_interface_addr si;
2458 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2459 si.channel = IPMI_BMC_CHANNEL;
2462 msg.netfn = IPMI_NETFN_APP_REQUEST;
2463 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2466 return i_ipmi_request(NULL,
2468 (struct ipmi_addr *) &si,
2475 intf->channels[0].address,
2476 intf->channels[0].lun,
2481 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2483 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2484 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2485 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2489 if (msg->msg.data[0] != 0) {
2490 /* Error from getting the GUID, the BMC doesn't have one. */
2491 intf->bmc->guid_set = 0;
2495 if (msg->msg.data_len < 17) {
2496 intf->bmc->guid_set = 0;
2497 printk(KERN_WARNING PFX
2498 "guid_handler: The GUID response from the BMC was too"
2499 " short, it was %d but should have been 17. Assuming"
2500 " GUID is not available.\n",
2505 memcpy(intf->bmc->guid, msg->msg.data, 16);
2506 intf->bmc->guid_set = 1;
2508 wake_up(&intf->waitq);
2512 get_guid(ipmi_smi_t intf)
2516 intf->bmc->guid_set = 0x2;
2517 intf->null_user_handler = guid_handler;
2518 rv = send_guid_cmd(intf, 0);
2520 /* Send failed, no GUID available. */
2521 intf->bmc->guid_set = 0;
2522 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2523 intf->null_user_handler = NULL;
2527 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2529 struct kernel_ipmi_msg msg;
2530 unsigned char data[1];
2531 struct ipmi_system_interface_addr si;
2533 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2534 si.channel = IPMI_BMC_CHANNEL;
2537 msg.netfn = IPMI_NETFN_APP_REQUEST;
2538 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2542 return i_ipmi_request(NULL,
2544 (struct ipmi_addr *) &si,
2551 intf->channels[0].address,
2552 intf->channels[0].lun,
2557 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2562 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2563 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2564 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2566 /* It's the one we want */
2567 if (msg->msg.data[0] != 0) {
2568 /* Got an error from the channel, just go on. */
2570 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2571 /* If the MC does not support this
2572 command, that is legal. We just
2573 assume it has one IPMB at channel
2575 intf->channels[0].medium
2576 = IPMI_CHANNEL_MEDIUM_IPMB;
2577 intf->channels[0].protocol
2578 = IPMI_CHANNEL_PROTOCOL_IPMB;
2581 intf->curr_channel = IPMI_MAX_CHANNELS;
2582 wake_up(&intf->waitq);
2587 if (msg->msg.data_len < 4) {
2588 /* Message not big enough, just go on. */
2591 chan = intf->curr_channel;
2592 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2593 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2596 intf->curr_channel++;
2597 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2598 wake_up(&intf->waitq);
2600 rv = send_channel_info_cmd(intf, intf->curr_channel);
2603 /* Got an error somehow, just give up. */
2604 intf->curr_channel = IPMI_MAX_CHANNELS;
2605 wake_up(&intf->waitq);
2607 printk(KERN_WARNING PFX
2608 "Error sending channel information: %d\n",
2616 void ipmi_poll_interface(ipmi_user_t user)
2618 ipmi_smi_t intf = user->intf;
2620 if (intf->handlers->poll)
2621 intf->handlers->poll(intf->send_info);
2624 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2626 struct ipmi_device_id *device_id,
2627 struct device *si_dev,
2628 const char *sysfs_name,
2629 unsigned char slave_addr)
2635 struct list_head *link;
2637 /* Make sure the driver is actually initialized, this handles
2638 problems with initialization order. */
2640 rv = ipmi_init_msghandler();
2643 /* The init code doesn't return an error if it was turned
2644 off, but it won't initialize. Check that. */
2649 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2653 intf->ipmi_version_major = ipmi_version_major(device_id);
2654 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2656 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2661 intf->intf_num = -1; /* Mark it invalid for now. */
2662 kref_init(&intf->refcount);
2663 intf->bmc->id = *device_id;
2664 intf->si_dev = si_dev;
2665 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2666 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2667 intf->channels[j].lun = 2;
2669 if (slave_addr != 0)
2670 intf->channels[0].address = slave_addr;
2671 INIT_LIST_HEAD(&intf->users);
2672 intf->handlers = handlers;
2673 intf->send_info = send_info;
2674 spin_lock_init(&intf->seq_lock);
2675 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2676 intf->seq_table[j].inuse = 0;
2677 intf->seq_table[j].seqid = 0;
2680 #ifdef CONFIG_PROC_FS
2681 mutex_init(&intf->proc_entry_lock);
2683 spin_lock_init(&intf->waiting_msgs_lock);
2684 INIT_LIST_HEAD(&intf->waiting_msgs);
2685 spin_lock_init(&intf->events_lock);
2686 INIT_LIST_HEAD(&intf->waiting_events);
2687 intf->waiting_events_count = 0;
2688 mutex_init(&intf->cmd_rcvrs_mutex);
2689 spin_lock_init(&intf->maintenance_mode_lock);
2690 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2691 init_waitqueue_head(&intf->waitq);
2693 spin_lock_init(&intf->counter_lock);
2694 intf->proc_dir = NULL;
2696 mutex_lock(&smi_watchers_mutex);
2697 mutex_lock(&ipmi_interfaces_mutex);
2698 /* Look for a hole in the numbers. */
2700 link = &ipmi_interfaces;
2701 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2702 if (tintf->intf_num != i) {
2703 link = &tintf->link;
2708 /* Add the new interface in numeric order. */
2710 list_add_rcu(&intf->link, &ipmi_interfaces);
2712 list_add_tail_rcu(&intf->link, link);
2714 rv = handlers->start_processing(send_info, intf);
2720 if ((intf->ipmi_version_major > 1)
2721 || ((intf->ipmi_version_major == 1)
2722 && (intf->ipmi_version_minor >= 5)))
2724 /* Start scanning the channels to see what is
2726 intf->null_user_handler = channel_handler;
2727 intf->curr_channel = 0;
2728 rv = send_channel_info_cmd(intf, 0);
2732 /* Wait for the channel info to be read. */
2733 wait_event(intf->waitq,
2734 intf->curr_channel >= IPMI_MAX_CHANNELS);
2735 intf->null_user_handler = NULL;
2737 /* Assume a single IPMB channel at zero. */
2738 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2739 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2743 rv = add_proc_entries(intf, i);
2745 rv = ipmi_bmc_register(intf, i, sysfs_name);
2750 remove_proc_entries(intf);
2751 intf->handlers = NULL;
2752 list_del_rcu(&intf->link);
2753 mutex_unlock(&ipmi_interfaces_mutex);
2754 mutex_unlock(&smi_watchers_mutex);
2756 kref_put(&intf->refcount, intf_free);
2759 * Keep memory order straight for RCU readers. Make
2760 * sure everything else is committed to memory before
2761 * setting intf_num to mark the interface valid.
2765 mutex_unlock(&ipmi_interfaces_mutex);
2766 /* After this point the interface is legal to use. */
2767 call_smi_watchers(i, intf->si_dev);
2768 mutex_unlock(&smi_watchers_mutex);
2774 static void cleanup_smi_msgs(ipmi_smi_t intf)
2777 struct seq_table *ent;
2779 /* No need for locks, the interface is down. */
2780 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2781 ent = &(intf->seq_table[i]);
2784 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2788 int ipmi_unregister_smi(ipmi_smi_t intf)
2790 struct ipmi_smi_watcher *w;
2791 int intf_num = intf->intf_num;
2793 ipmi_bmc_unregister(intf);
2795 mutex_lock(&smi_watchers_mutex);
2796 mutex_lock(&ipmi_interfaces_mutex);
2797 intf->intf_num = -1;
2798 intf->handlers = NULL;
2799 list_del_rcu(&intf->link);
2800 mutex_unlock(&ipmi_interfaces_mutex);
2803 cleanup_smi_msgs(intf);
2805 remove_proc_entries(intf);
2807 /* Call all the watcher interfaces to tell them that
2808 an interface is gone. */
2809 list_for_each_entry(w, &smi_watchers, link)
2810 w->smi_gone(intf_num);
2811 mutex_unlock(&smi_watchers_mutex);
2813 kref_put(&intf->refcount, intf_free);
2817 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2818 struct ipmi_smi_msg *msg)
2820 struct ipmi_ipmb_addr ipmb_addr;
2821 struct ipmi_recv_msg *recv_msg;
2822 unsigned long flags;
2825 /* This is 11, not 10, because the response must contain a
2826 * completion code. */
2827 if (msg->rsp_size < 11) {
2828 /* Message not big enough, just ignore it. */
2829 spin_lock_irqsave(&intf->counter_lock, flags);
2830 intf->invalid_ipmb_responses++;
2831 spin_unlock_irqrestore(&intf->counter_lock, flags);
2835 if (msg->rsp[2] != 0) {
2836 /* An error getting the response, just ignore it. */
2840 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2841 ipmb_addr.slave_addr = msg->rsp[6];
2842 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2843 ipmb_addr.lun = msg->rsp[7] & 3;
2845 /* It's a response from a remote entity. Look up the sequence
2846 number and handle the response. */
2847 if (intf_find_seq(intf,
2851 (msg->rsp[4] >> 2) & (~1),
2852 (struct ipmi_addr *) &(ipmb_addr),
2855 /* We were unable to find the sequence number,
2856 so just nuke the message. */
2857 spin_lock_irqsave(&intf->counter_lock, flags);
2858 intf->unhandled_ipmb_responses++;
2859 spin_unlock_irqrestore(&intf->counter_lock, flags);
2863 memcpy(recv_msg->msg_data,
2866 /* THe other fields matched, so no need to set them, except
2867 for netfn, which needs to be the response that was
2868 returned, not the request value. */
2869 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2870 recv_msg->msg.data = recv_msg->msg_data;
2871 recv_msg->msg.data_len = msg->rsp_size - 10;
2872 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2873 spin_lock_irqsave(&intf->counter_lock, flags);
2874 intf->handled_ipmb_responses++;
2875 spin_unlock_irqrestore(&intf->counter_lock, flags);
2876 deliver_response(recv_msg);
2881 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2882 struct ipmi_smi_msg *msg)
2884 struct cmd_rcvr *rcvr;
2886 unsigned char netfn;
2889 ipmi_user_t user = NULL;
2890 struct ipmi_ipmb_addr *ipmb_addr;
2891 struct ipmi_recv_msg *recv_msg;
2892 unsigned long flags;
2893 struct ipmi_smi_handlers *handlers;
2895 if (msg->rsp_size < 10) {
2896 /* Message not big enough, just ignore it. */
2897 spin_lock_irqsave(&intf->counter_lock, flags);
2898 intf->invalid_commands++;
2899 spin_unlock_irqrestore(&intf->counter_lock, flags);
2903 if (msg->rsp[2] != 0) {
2904 /* An error getting the response, just ignore it. */
2908 netfn = msg->rsp[4] >> 2;
2910 chan = msg->rsp[3] & 0xf;
2913 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2916 kref_get(&user->refcount);
2922 /* We didn't find a user, deliver an error response. */
2923 spin_lock_irqsave(&intf->counter_lock, flags);
2924 intf->unhandled_commands++;
2925 spin_unlock_irqrestore(&intf->counter_lock, flags);
2927 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2928 msg->data[1] = IPMI_SEND_MSG_CMD;
2929 msg->data[2] = msg->rsp[3];
2930 msg->data[3] = msg->rsp[6];
2931 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2932 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2933 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2935 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2936 msg->data[8] = msg->rsp[8]; /* cmd */
2937 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2938 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2939 msg->data_size = 11;
2944 printk("Invalid command:");
2945 for (m = 0; m < msg->data_size; m++)
2946 printk(" %2.2x", msg->data[m]);
2951 handlers = intf->handlers;
2953 handlers->sender(intf->send_info, msg, 0);
2954 /* We used the message, so return the value
2955 that causes it to not be freed or
2961 /* Deliver the message to the user. */
2962 spin_lock_irqsave(&intf->counter_lock, flags);
2963 intf->handled_commands++;
2964 spin_unlock_irqrestore(&intf->counter_lock, flags);
2966 recv_msg = ipmi_alloc_recv_msg();
2968 /* We couldn't allocate memory for the
2969 message, so requeue it for handling
2972 kref_put(&user->refcount, free_user);
2974 /* Extract the source address from the data. */
2975 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2976 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2977 ipmb_addr->slave_addr = msg->rsp[6];
2978 ipmb_addr->lun = msg->rsp[7] & 3;
2979 ipmb_addr->channel = msg->rsp[3] & 0xf;
2981 /* Extract the rest of the message information
2982 from the IPMB header.*/
2983 recv_msg->user = user;
2984 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2985 recv_msg->msgid = msg->rsp[7] >> 2;
2986 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2987 recv_msg->msg.cmd = msg->rsp[8];
2988 recv_msg->msg.data = recv_msg->msg_data;
2990 /* We chop off 10, not 9 bytes because the checksum
2991 at the end also needs to be removed. */
2992 recv_msg->msg.data_len = msg->rsp_size - 10;
2993 memcpy(recv_msg->msg_data,
2995 msg->rsp_size - 10);
2996 deliver_response(recv_msg);
3003 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3004 struct ipmi_smi_msg *msg)
3006 struct ipmi_lan_addr lan_addr;
3007 struct ipmi_recv_msg *recv_msg;
3008 unsigned long flags;
3011 /* This is 13, not 12, because the response must contain a
3012 * completion code. */
3013 if (msg->rsp_size < 13) {
3014 /* Message not big enough, just ignore it. */
3015 spin_lock_irqsave(&intf->counter_lock, flags);
3016 intf->invalid_lan_responses++;
3017 spin_unlock_irqrestore(&intf->counter_lock, flags);
3021 if (msg->rsp[2] != 0) {
3022 /* An error getting the response, just ignore it. */
3026 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3027 lan_addr.session_handle = msg->rsp[4];
3028 lan_addr.remote_SWID = msg->rsp[8];
3029 lan_addr.local_SWID = msg->rsp[5];
3030 lan_addr.channel = msg->rsp[3] & 0x0f;
3031 lan_addr.privilege = msg->rsp[3] >> 4;
3032 lan_addr.lun = msg->rsp[9] & 3;
3034 /* It's a response from a remote entity. Look up the sequence
3035 number and handle the response. */
3036 if (intf_find_seq(intf,
3040 (msg->rsp[6] >> 2) & (~1),
3041 (struct ipmi_addr *) &(lan_addr),
3044 /* We were unable to find the sequence number,
3045 so just nuke the message. */
3046 spin_lock_irqsave(&intf->counter_lock, flags);
3047 intf->unhandled_lan_responses++;
3048 spin_unlock_irqrestore(&intf->counter_lock, flags);
3052 memcpy(recv_msg->msg_data,
3054 msg->rsp_size - 11);
3055 /* The other fields matched, so no need to set them, except
3056 for netfn, which needs to be the response that was
3057 returned, not the request value. */
3058 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3059 recv_msg->msg.data = recv_msg->msg_data;
3060 recv_msg->msg.data_len = msg->rsp_size - 12;
3061 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3062 spin_lock_irqsave(&intf->counter_lock, flags);
3063 intf->handled_lan_responses++;
3064 spin_unlock_irqrestore(&intf->counter_lock, flags);
3065 deliver_response(recv_msg);
3070 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3071 struct ipmi_smi_msg *msg)
3073 struct cmd_rcvr *rcvr;
3075 unsigned char netfn;
3078 ipmi_user_t user = NULL;
3079 struct ipmi_lan_addr *lan_addr;
3080 struct ipmi_recv_msg *recv_msg;
3081 unsigned long flags;
3083 if (msg->rsp_size < 12) {
3084 /* Message not big enough, just ignore it. */
3085 spin_lock_irqsave(&intf->counter_lock, flags);
3086 intf->invalid_commands++;
3087 spin_unlock_irqrestore(&intf->counter_lock, flags);
3091 if (msg->rsp[2] != 0) {
3092 /* An error getting the response, just ignore it. */
3096 netfn = msg->rsp[6] >> 2;
3098 chan = msg->rsp[3] & 0xf;
3101 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3104 kref_get(&user->refcount);
3110 /* We didn't find a user, just give up. */
3111 spin_lock_irqsave(&intf->counter_lock, flags);
3112 intf->unhandled_commands++;
3113 spin_unlock_irqrestore(&intf->counter_lock, flags);
3115 rv = 0; /* Don't do anything with these messages, just
3116 allow them to be freed. */
3118 /* Deliver the message to the user. */
3119 spin_lock_irqsave(&intf->counter_lock, flags);
3120 intf->handled_commands++;
3121 spin_unlock_irqrestore(&intf->counter_lock, flags);
3123 recv_msg = ipmi_alloc_recv_msg();
3125 /* We couldn't allocate memory for the
3126 message, so requeue it for handling
3129 kref_put(&user->refcount, free_user);
3131 /* Extract the source address from the data. */
3132 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3133 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3134 lan_addr->session_handle = msg->rsp[4];
3135 lan_addr->remote_SWID = msg->rsp[8];
3136 lan_addr->local_SWID = msg->rsp[5];
3137 lan_addr->lun = msg->rsp[9] & 3;
3138 lan_addr->channel = msg->rsp[3] & 0xf;
3139 lan_addr->privilege = msg->rsp[3] >> 4;
3141 /* Extract the rest of the message information
3142 from the IPMB header.*/
3143 recv_msg->user = user;
3144 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3145 recv_msg->msgid = msg->rsp[9] >> 2;
3146 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3147 recv_msg->msg.cmd = msg->rsp[10];
3148 recv_msg->msg.data = recv_msg->msg_data;
3150 /* We chop off 12, not 11 bytes because the checksum
3151 at the end also needs to be removed. */
3152 recv_msg->msg.data_len = msg->rsp_size - 12;
3153 memcpy(recv_msg->msg_data,
3155 msg->rsp_size - 12);
3156 deliver_response(recv_msg);
3163 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3164 struct ipmi_smi_msg *msg)
3166 struct ipmi_system_interface_addr *smi_addr;
3168 recv_msg->msgid = 0;
3169 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3170 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3171 smi_addr->channel = IPMI_BMC_CHANNEL;
3172 smi_addr->lun = msg->rsp[0] & 3;
3173 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3174 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3175 recv_msg->msg.cmd = msg->rsp[1];
3176 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3177 recv_msg->msg.data = recv_msg->msg_data;
3178 recv_msg->msg.data_len = msg->rsp_size - 3;
3181 static int handle_read_event_rsp(ipmi_smi_t intf,
3182 struct ipmi_smi_msg *msg)
3184 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3185 struct list_head msgs;
3188 int deliver_count = 0;
3189 unsigned long flags;
3191 if (msg->rsp_size < 19) {
3192 /* Message is too small to be an IPMB event. */
3193 spin_lock_irqsave(&intf->counter_lock, flags);
3194 intf->invalid_events++;
3195 spin_unlock_irqrestore(&intf->counter_lock, flags);
3199 if (msg->rsp[2] != 0) {
3200 /* An error getting the event, just ignore it. */
3204 INIT_LIST_HEAD(&msgs);
3206 spin_lock_irqsave(&intf->events_lock, flags);
3208 spin_lock(&intf->counter_lock);
3210 spin_unlock(&intf->counter_lock);
3212 /* Allocate and fill in one message for every user that is getting
3215 list_for_each_entry_rcu(user, &intf->users, link) {
3216 if (!user->gets_events)
3219 recv_msg = ipmi_alloc_recv_msg();
3222 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3224 list_del(&recv_msg->link);
3225 ipmi_free_recv_msg(recv_msg);
3227 /* We couldn't allocate memory for the
3228 message, so requeue it for handling
3236 copy_event_into_recv_msg(recv_msg, msg);
3237 recv_msg->user = user;
3238 kref_get(&user->refcount);
3239 list_add_tail(&(recv_msg->link), &msgs);
3243 if (deliver_count) {
3244 /* Now deliver all the messages. */
3245 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3246 list_del(&recv_msg->link);
3247 deliver_response(recv_msg);
3249 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3250 /* No one to receive the message, put it in queue if there's
3251 not already too many things in the queue. */
3252 recv_msg = ipmi_alloc_recv_msg();
3254 /* We couldn't allocate memory for the
3255 message, so requeue it for handling
3261 copy_event_into_recv_msg(recv_msg, msg);
3262 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3263 intf->waiting_events_count++;
3265 /* There's too many things in the queue, discard this
3267 printk(KERN_WARNING PFX "Event queue full, discarding an"
3268 " incoming event\n");
3272 spin_unlock_irqrestore(&(intf->events_lock), flags);
3277 static int handle_bmc_rsp(ipmi_smi_t intf,
3278 struct ipmi_smi_msg *msg)
3280 struct ipmi_recv_msg *recv_msg;
3281 unsigned long flags;
3282 struct ipmi_user *user;
3284 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3285 if (recv_msg == NULL)
3287 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3288 "could be because of a malformed message, or\n"
3289 "because of a hardware error. Contact your\n"
3290 "hardware vender for assistance\n");
3294 user = recv_msg->user;
3295 /* Make sure the user still exists. */
3296 if (user && !user->valid) {
3297 /* The user for the message went away, so give up. */
3298 spin_lock_irqsave(&intf->counter_lock, flags);
3299 intf->unhandled_local_responses++;
3300 spin_unlock_irqrestore(&intf->counter_lock, flags);
3301 ipmi_free_recv_msg(recv_msg);
3303 struct ipmi_system_interface_addr *smi_addr;
3305 spin_lock_irqsave(&intf->counter_lock, flags);
3306 intf->handled_local_responses++;
3307 spin_unlock_irqrestore(&intf->counter_lock, flags);
3308 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3309 recv_msg->msgid = msg->msgid;
3310 smi_addr = ((struct ipmi_system_interface_addr *)
3312 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3313 smi_addr->channel = IPMI_BMC_CHANNEL;
3314 smi_addr->lun = msg->rsp[0] & 3;
3315 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3316 recv_msg->msg.cmd = msg->rsp[1];
3317 memcpy(recv_msg->msg_data,
3320 recv_msg->msg.data = recv_msg->msg_data;
3321 recv_msg->msg.data_len = msg->rsp_size - 2;
3322 deliver_response(recv_msg);
3328 /* Handle a new message. Return 1 if the message should be requeued,
3329 0 if the message should be freed, or -1 if the message should not
3330 be freed or requeued. */
3331 static int handle_new_recv_msg(ipmi_smi_t intf,
3332 struct ipmi_smi_msg *msg)
3340 for (m = 0; m < msg->rsp_size; m++)
3341 printk(" %2.2x", msg->rsp[m]);
3344 if (msg->rsp_size < 2) {
3345 /* Message is too small to be correct. */
3346 printk(KERN_WARNING PFX "BMC returned to small a message"
3347 " for netfn %x cmd %x, got %d bytes\n",
3348 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3350 /* Generate an error response for the message. */
3351 msg->rsp[0] = msg->data[0] | (1 << 2);
3352 msg->rsp[1] = msg->data[1];
3353 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3355 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3356 || (msg->rsp[1] != msg->data[1])) /* Command */
3358 /* The response is not even marginally correct. */
3359 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3360 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3361 (msg->data[0] >> 2) | 1, msg->data[1],
3362 msg->rsp[0] >> 2, msg->rsp[1]);
3364 /* Generate an error response for the message. */
3365 msg->rsp[0] = msg->data[0] | (1 << 2);
3366 msg->rsp[1] = msg->data[1];
3367 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3371 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3372 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3373 && (msg->user_data != NULL))
3375 /* It's a response to a response we sent. For this we
3376 deliver a send message response to the user. */
3377 struct ipmi_recv_msg *recv_msg = msg->user_data;
3380 if (msg->rsp_size < 2)
3381 /* Message is too small to be correct. */
3384 chan = msg->data[2] & 0x0f;
3385 if (chan >= IPMI_MAX_CHANNELS)
3386 /* Invalid channel number */
3392 /* Make sure the user still exists. */
3393 if (!recv_msg->user || !recv_msg->user->valid)
3396 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3397 recv_msg->msg.data = recv_msg->msg_data;
3398 recv_msg->msg.data_len = 1;
3399 recv_msg->msg_data[0] = msg->rsp[2];
3400 deliver_response(recv_msg);
3401 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3402 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3404 /* It's from the receive queue. */
3405 chan = msg->rsp[3] & 0xf;
3406 if (chan >= IPMI_MAX_CHANNELS) {
3407 /* Invalid channel number */
3412 switch (intf->channels[chan].medium) {
3413 case IPMI_CHANNEL_MEDIUM_IPMB:
3414 if (msg->rsp[4] & 0x04) {
3415 /* It's a response, so find the
3416 requesting message and send it up. */
3417 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3419 /* It's a command to the SMS from some other
3420 entity. Handle that. */
3421 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3425 case IPMI_CHANNEL_MEDIUM_8023LAN:
3426 case IPMI_CHANNEL_MEDIUM_ASYNC:
3427 if (msg->rsp[6] & 0x04) {
3428 /* It's a response, so find the
3429 requesting message and send it up. */
3430 requeue = handle_lan_get_msg_rsp(intf, msg);
3432 /* It's a command to the SMS from some other
3433 entity. Handle that. */
3434 requeue = handle_lan_get_msg_cmd(intf, msg);
3439 /* We don't handle the channel type, so just
3440 * free the message. */
3444 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3445 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3447 /* It's an asyncronous event. */
3448 requeue = handle_read_event_rsp(intf, msg);
3450 /* It's a response from the local BMC. */
3451 requeue = handle_bmc_rsp(intf, msg);
3458 /* Handle a new message from the lower layer. */
3459 void ipmi_smi_msg_received(ipmi_smi_t intf,
3460 struct ipmi_smi_msg *msg)
3462 unsigned long flags = 0; /* keep us warning-free. */
3464 int run_to_completion;
3467 if ((msg->data_size >= 2)
3468 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3469 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3470 && (msg->user_data == NULL))
3472 /* This is the local response to a command send, start
3473 the timer for these. The user_data will not be
3474 NULL if this is a response send, and we will let
3475 response sends just go through. */
3477 /* Check for errors, if we get certain errors (ones
3478 that mean basically we can try again later), we
3479 ignore them and start the timer. Otherwise we
3480 report the error immediately. */
3481 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3482 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3483 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3484 && (msg->rsp[2] != IPMI_BUS_ERR)
3485 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3487 int chan = msg->rsp[3] & 0xf;
3489 /* Got an error sending the message, handle it. */
3490 spin_lock_irqsave(&intf->counter_lock, flags);
3491 if (chan >= IPMI_MAX_CHANNELS)
3492 ; /* This shouldn't happen */
3493 else if ((intf->channels[chan].medium
3494 == IPMI_CHANNEL_MEDIUM_8023LAN)
3495 || (intf->channels[chan].medium
3496 == IPMI_CHANNEL_MEDIUM_ASYNC))
3497 intf->sent_lan_command_errs++;
3499 intf->sent_ipmb_command_errs++;
3500 spin_unlock_irqrestore(&intf->counter_lock, flags);
3501 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3503 /* The message was sent, start the timer. */
3504 intf_start_seq_timer(intf, msg->msgid);
3507 ipmi_free_smi_msg(msg);
3511 /* To preserve message order, if the list is not empty, we
3512 tack this message onto the end of the list. */
3513 run_to_completion = intf->run_to_completion;
3514 if (!run_to_completion)
3515 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3516 if (!list_empty(&intf->waiting_msgs)) {
3517 list_add_tail(&msg->link, &intf->waiting_msgs);
3518 if (!run_to_completion)
3519 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3522 if (!run_to_completion)
3523 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3525 rv = handle_new_recv_msg(intf, msg);
3527 /* Could not handle the message now, just add it to a
3528 list to handle later. */
3529 run_to_completion = intf->run_to_completion;
3530 if (!run_to_completion)
3531 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3532 list_add_tail(&msg->link, &intf->waiting_msgs);
3533 if (!run_to_completion)
3534 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3535 } else if (rv == 0) {
3536 ipmi_free_smi_msg(msg);
3543 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3548 list_for_each_entry_rcu(user, &intf->users, link) {
3549 if (!user->handler->ipmi_watchdog_pretimeout)
3552 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3558 static struct ipmi_smi_msg *
3559 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3560 unsigned char seq, long seqid)
3562 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3564 /* If we can't allocate the message, then just return, we
3565 get 4 retries, so this should be ok. */
3568 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3569 smi_msg->data_size = recv_msg->msg.data_len;
3570 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3576 for (m = 0; m < smi_msg->data_size; m++)
3577 printk(" %2.2x", smi_msg->data[m]);
3584 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3585 struct list_head *timeouts, long timeout_period,
3586 int slot, unsigned long *flags)
3588 struct ipmi_recv_msg *msg;
3589 struct ipmi_smi_handlers *handlers;
3591 if (intf->intf_num == -1)
3597 ent->timeout -= timeout_period;
3598 if (ent->timeout > 0)
3601 if (ent->retries_left == 0) {
3602 /* The message has used all its retries. */
3604 msg = ent->recv_msg;
3605 list_add_tail(&msg->link, timeouts);
3606 spin_lock(&intf->counter_lock);
3608 intf->timed_out_ipmb_broadcasts++;
3609 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3610 intf->timed_out_lan_commands++;
3612 intf->timed_out_ipmb_commands++;
3613 spin_unlock(&intf->counter_lock);
3615 struct ipmi_smi_msg *smi_msg;
3616 /* More retries, send again. */
3618 /* Start with the max timer, set to normal
3619 timer after the message is sent. */
3620 ent->timeout = MAX_MSG_TIMEOUT;
3621 ent->retries_left--;
3622 spin_lock(&intf->counter_lock);
3623 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3624 intf->retransmitted_lan_commands++;
3626 intf->retransmitted_ipmb_commands++;
3627 spin_unlock(&intf->counter_lock);
3629 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3634 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3636 /* Send the new message. We send with a zero
3637 * priority. It timed out, I doubt time is
3638 * that critical now, and high priority
3639 * messages are really only for messages to the
3640 * local MC, which don't get resent. */
3641 handlers = intf->handlers;
3643 intf->handlers->sender(intf->send_info,
3646 ipmi_free_smi_msg(smi_msg);
3648 spin_lock_irqsave(&intf->seq_lock, *flags);
3652 static void ipmi_timeout_handler(long timeout_period)
3655 struct list_head timeouts;
3656 struct ipmi_recv_msg *msg, *msg2;
3657 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3658 unsigned long flags;
3662 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3663 /* See if any waiting messages need to be processed. */
3664 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3665 list_for_each_entry_safe(smi_msg, smi_msg2,
3666 &intf->waiting_msgs, link) {
3667 if (!handle_new_recv_msg(intf, smi_msg)) {
3668 list_del(&smi_msg->link);
3669 ipmi_free_smi_msg(smi_msg);
3671 /* To preserve message order, quit if we
3672 can't handle a message. */
3676 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3678 /* Go through the seq table and find any messages that
3679 have timed out, putting them in the timeouts
3681 INIT_LIST_HEAD(&timeouts);
3682 spin_lock_irqsave(&intf->seq_lock, flags);
3683 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3684 check_msg_timeout(intf, &(intf->seq_table[i]),
3685 &timeouts, timeout_period, i,
3687 spin_unlock_irqrestore(&intf->seq_lock, flags);
3689 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3690 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3693 * Maintenance mode handling. Check the timeout
3694 * optimistically before we claim the lock. It may
3695 * mean a timeout gets missed occasionally, but that
3696 * only means the timeout gets extended by one period
3697 * in that case. No big deal, and it avoids the lock
3700 if (intf->auto_maintenance_timeout > 0) {
3701 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3702 if (intf->auto_maintenance_timeout > 0) {
3703 intf->auto_maintenance_timeout
3705 if (!intf->maintenance_mode
3706 && (intf->auto_maintenance_timeout <= 0))
3708 intf->maintenance_mode_enable = 0;
3709 maintenance_mode_update(intf);
3712 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3719 static void ipmi_request_event(void)
3722 struct ipmi_smi_handlers *handlers;
3725 /* Called from the timer, no need to check if handlers is
3727 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3728 /* No event requests when in maintenance mode. */
3729 if (intf->maintenance_mode_enable)
3732 handlers = intf->handlers;
3734 handlers->request_events(intf->send_info);
3739 static struct timer_list ipmi_timer;
3741 /* Call every ~100 ms. */
3742 #define IPMI_TIMEOUT_TIME 100
3744 /* How many jiffies does it take to get to the timeout time. */
3745 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3747 /* Request events from the queue every second (this is the number of
3748 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3749 future, IPMI will add a way to know immediately if an event is in
3750 the queue and this silliness can go away. */
3751 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3753 static atomic_t stop_operation;
3754 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3756 static void ipmi_timeout(unsigned long data)
3758 if (atomic_read(&stop_operation))
3762 if (ticks_to_req_ev == 0) {
3763 ipmi_request_event();
3764 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3767 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3769 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3773 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3774 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3776 /* FIXME - convert these to slabs. */
3777 static void free_smi_msg(struct ipmi_smi_msg *msg)
3779 atomic_dec(&smi_msg_inuse_count);
3783 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3785 struct ipmi_smi_msg *rv;
3786 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3788 rv->done = free_smi_msg;
3789 rv->user_data = NULL;
3790 atomic_inc(&smi_msg_inuse_count);
3795 static void free_recv_msg(struct ipmi_recv_msg *msg)
3797 atomic_dec(&recv_msg_inuse_count);
3801 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3803 struct ipmi_recv_msg *rv;
3805 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3808 rv->done = free_recv_msg;
3809 atomic_inc(&recv_msg_inuse_count);
3814 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3817 kref_put(&msg->user->refcount, free_user);
3821 #ifdef CONFIG_IPMI_PANIC_EVENT
3823 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3827 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3831 #ifdef CONFIG_IPMI_PANIC_STRING
3832 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3834 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3835 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3836 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3837 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3839 /* A get event receiver command, save it. */
3840 intf->event_receiver = msg->msg.data[1];
3841 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3845 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3847 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3848 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3849 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3850 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3852 /* A get device id command, save if we are an event
3853 receiver or generator. */
3854 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3855 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3860 static void send_panic_events(char *str)
3862 struct kernel_ipmi_msg msg;
3864 unsigned char data[16];
3865 struct ipmi_system_interface_addr *si;
3866 struct ipmi_addr addr;
3867 struct ipmi_smi_msg smi_msg;
3868 struct ipmi_recv_msg recv_msg;
3870 si = (struct ipmi_system_interface_addr *) &addr;
3871 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3872 si->channel = IPMI_BMC_CHANNEL;
3875 /* Fill in an event telling that we have failed. */
3876 msg.netfn = 0x04; /* Sensor or Event. */
3877 msg.cmd = 2; /* Platform event command. */
3880 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3881 data[1] = 0x03; /* This is for IPMI 1.0. */
3882 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3883 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3884 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3886 /* Put a few breadcrumbs in. Hopefully later we can add more things
3887 to make the panic events more useful. */
3894 smi_msg.done = dummy_smi_done_handler;
3895 recv_msg.done = dummy_recv_done_handler;
3897 /* For every registered interface, send the event. */
3898 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3899 if (!intf->handlers)
3900 /* Interface is not ready. */
3903 intf->run_to_completion = 1;
3904 /* Send the event announcing the panic. */
3905 intf->handlers->set_run_to_completion(intf->send_info, 1);
3906 i_ipmi_request(NULL,
3915 intf->channels[0].address,
3916 intf->channels[0].lun,
3917 0, 1); /* Don't retry, and don't wait. */
3920 #ifdef CONFIG_IPMI_PANIC_STRING
3921 /* On every interface, dump a bunch of OEM event holding the
3926 /* For every registered interface, send the event. */
3927 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3929 struct ipmi_ipmb_addr *ipmb;
3932 if (intf->intf_num == -1)
3933 /* Interface was not ready yet. */
3937 * intf_num is used as an marker to tell if the
3938 * interface is valid. Thus we need a read barrier to
3939 * make sure data fetched before checking intf_num
3944 /* First job here is to figure out where to send the
3945 OEM events. There's no way in IPMI to send OEM
3946 events using an event send command, so we have to
3947 find the SEL to put them in and stick them in
3950 /* Get capabilities from the get device id. */
3951 intf->local_sel_device = 0;
3952 intf->local_event_generator = 0;
3953 intf->event_receiver = 0;
3955 /* Request the device info from the local MC. */
3956 msg.netfn = IPMI_NETFN_APP_REQUEST;
3957 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3960 intf->null_user_handler = device_id_fetcher;
3961 i_ipmi_request(NULL,
3970 intf->channels[0].address,
3971 intf->channels[0].lun,
3972 0, 1); /* Don't retry, and don't wait. */
3974 if (intf->local_event_generator) {
3975 /* Request the event receiver from the local MC. */
3976 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3977 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3980 intf->null_user_handler = event_receiver_fetcher;
3981 i_ipmi_request(NULL,
3990 intf->channels[0].address,
3991 intf->channels[0].lun,
3992 0, 1); /* no retry, and no wait. */
3994 intf->null_user_handler = NULL;
3996 /* Validate the event receiver. The low bit must not
3997 be 1 (it must be a valid IPMB address), it cannot
3998 be zero, and it must not be my address. */
3999 if (((intf->event_receiver & 1) == 0)
4000 && (intf->event_receiver != 0)
4001 && (intf->event_receiver != intf->channels[0].address))
4003 /* The event receiver is valid, send an IPMB
4005 ipmb = (struct ipmi_ipmb_addr *) &addr;
4006 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4007 ipmb->channel = 0; /* FIXME - is this right? */
4008 ipmb->lun = intf->event_receiver_lun;
4009 ipmb->slave_addr = intf->event_receiver;
4010 } else if (intf->local_sel_device) {
4011 /* The event receiver was not valid (or was
4012 me), but I am an SEL device, just dump it
4014 si = (struct ipmi_system_interface_addr *) &addr;
4015 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4016 si->channel = IPMI_BMC_CHANNEL;
4019 continue; /* No where to send the event. */
4022 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4023 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4029 int size = strlen(p);
4035 data[2] = 0xf0; /* OEM event without timestamp. */
4036 data[3] = intf->channels[0].address;
4037 data[4] = j++; /* sequence # */
4038 /* Always give 11 bytes, so strncpy will fill
4039 it with zeroes for me. */
4040 strncpy(data+5, p, 11);
4043 i_ipmi_request(NULL,
4052 intf->channels[0].address,
4053 intf->channels[0].lun,
4054 0, 1); /* no retry, and no wait. */
4057 #endif /* CONFIG_IPMI_PANIC_STRING */
4059 #endif /* CONFIG_IPMI_PANIC_EVENT */
4061 static int has_panicked;
4063 static int panic_event(struct notifier_block *this,
4064 unsigned long event,
4073 /* For every registered interface, set it to run to completion. */
4074 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4075 if (!intf->handlers)
4076 /* Interface is not ready. */
4079 intf->run_to_completion = 1;
4080 intf->handlers->set_run_to_completion(intf->send_info, 1);
4083 #ifdef CONFIG_IPMI_PANIC_EVENT
4084 send_panic_events(ptr);
4090 static struct notifier_block panic_block = {
4091 .notifier_call = panic_event,
4093 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4096 static int ipmi_init_msghandler(void)
4103 rv = driver_register(&ipmidriver);
4105 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4109 printk(KERN_INFO "ipmi message handler version "
4110 IPMI_DRIVER_VERSION "\n");
4112 #ifdef CONFIG_PROC_FS
4113 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4114 if (!proc_ipmi_root) {
4115 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4119 proc_ipmi_root->owner = THIS_MODULE;
4120 #endif /* CONFIG_PROC_FS */
4122 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4123 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4125 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4132 static __init int ipmi_init_msghandler_mod(void)
4134 ipmi_init_msghandler();
4138 static __exit void cleanup_ipmi(void)
4145 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4147 /* This can't be called if any interfaces exist, so no worry about
4148 shutting down the interfaces. */
4150 /* Tell the timer to stop, then wait for it to stop. This avoids
4151 problems with race conditions removing the timer here. */
4152 atomic_inc(&stop_operation);
4153 del_timer_sync(&ipmi_timer);
4155 #ifdef CONFIG_PROC_FS
4156 remove_proc_entry(proc_ipmi_root->name, NULL);
4157 #endif /* CONFIG_PROC_FS */
4159 driver_unregister(&ipmidriver);
4163 /* Check for buffer leaks. */
4164 count = atomic_read(&smi_msg_inuse_count);
4166 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4168 count = atomic_read(&recv_msg_inuse_count);
4170 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4173 module_exit(cleanup_ipmi);
4175 module_init(ipmi_init_msghandler_mod);
4176 MODULE_LICENSE("GPL");
4177 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4178 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4179 MODULE_VERSION(IPMI_DRIVER_VERSION);
4181 EXPORT_SYMBOL(ipmi_create_user);
4182 EXPORT_SYMBOL(ipmi_destroy_user);
4183 EXPORT_SYMBOL(ipmi_get_version);
4184 EXPORT_SYMBOL(ipmi_request_settime);
4185 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4186 EXPORT_SYMBOL(ipmi_poll_interface);
4187 EXPORT_SYMBOL(ipmi_register_smi);
4188 EXPORT_SYMBOL(ipmi_unregister_smi);
4189 EXPORT_SYMBOL(ipmi_register_for_cmd);
4190 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4191 EXPORT_SYMBOL(ipmi_smi_msg_received);
4192 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4193 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4194 EXPORT_SYMBOL(ipmi_addr_length);
4195 EXPORT_SYMBOL(ipmi_validate_addr);
4196 EXPORT_SYMBOL(ipmi_set_gets_events);
4197 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4198 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4199 EXPORT_SYMBOL(ipmi_set_my_address);
4200 EXPORT_SYMBOL(ipmi_get_my_address);
4201 EXPORT_SYMBOL(ipmi_set_my_LUN);
4202 EXPORT_SYMBOL(ipmi_get_my_LUN);
4203 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4204 EXPORT_SYMBOL(ipmi_free_recv_msg);