4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
172 struct kref refcount;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount;
196 /* The list of upper layers that are using me. seq_lock
198 struct list_head users;
200 /* Used for wake ups at startup. */
201 wait_queue_head_t waitq;
203 struct bmc_device *bmc;
206 /* This is the lower-layer's sender routine. */
207 struct ipmi_smi_handlers *handlers;
210 #ifdef CONFIG_PROC_FS
211 /* A list of proc entries for this interface. This does not
212 need a lock, only one thread creates it and only one thread
214 spinlock_t proc_entry_lock;
215 struct ipmi_proc_entry *proc_entries;
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
221 /* A table of sequence numbers for this interface. We use the
222 sequence numbers for IPMB messages that go out of the
223 interface to match them up with their responses. A routine
224 is called periodically to time the items in this list. */
226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
229 /* Messages that were delayed for some reason (out of memory,
230 for instance), will go in here to be processed later in a
231 periodic timer interrupt. */
232 spinlock_t waiting_msgs_lock;
233 struct list_head waiting_msgs;
235 /* The list of command receivers that are registered for commands
236 on this interface. */
237 struct mutex cmd_rcvrs_mutex;
238 struct list_head cmd_rcvrs;
240 /* Events that were queues because no one was there to receive
242 spinlock_t events_lock; /* For dealing with event stuff. */
243 struct list_head waiting_events;
244 unsigned int waiting_events_count; /* How many events in queue? */
246 /* The event receiver for my BMC, only really used at panic
247 shutdown as a place to store this. */
248 unsigned char event_receiver;
249 unsigned char event_receiver_lun;
250 unsigned char local_sel_device;
251 unsigned char local_event_generator;
253 /* A cheap hack, if this is non-null and a message to an
254 interface comes in with a NULL user, call this routine with
255 it. Note that the message will still be freed by the
256 caller. This only works on the system interface. */
257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
259 /* When we are scanning the channels for an SMI, this will
260 tell which channel we are scanning. */
263 /* Channel information */
264 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
267 struct proc_dir_entry *proc_dir;
268 char proc_dir_name[10];
270 spinlock_t counter_lock; /* For making counters atomic. */
272 /* Commands we got that were invalid. */
273 unsigned int sent_invalid_commands;
275 /* Commands we sent to the MC. */
276 unsigned int sent_local_commands;
277 /* Responses from the MC that were delivered to a user. */
278 unsigned int handled_local_responses;
279 /* Responses from the MC that were not delivered to a user. */
280 unsigned int unhandled_local_responses;
282 /* Commands we sent out to the IPMB bus. */
283 unsigned int sent_ipmb_commands;
284 /* Commands sent on the IPMB that had errors on the SEND CMD */
285 unsigned int sent_ipmb_command_errs;
286 /* Each retransmit increments this count. */
287 unsigned int retransmitted_ipmb_commands;
288 /* When a message times out (runs out of retransmits) this is
290 unsigned int timed_out_ipmb_commands;
292 /* This is like above, but for broadcasts. Broadcasts are
293 *not* included in the above count (they are expected to
295 unsigned int timed_out_ipmb_broadcasts;
297 /* Responses I have sent to the IPMB bus. */
298 unsigned int sent_ipmb_responses;
300 /* The response was delivered to the user. */
301 unsigned int handled_ipmb_responses;
302 /* The response had invalid data in it. */
303 unsigned int invalid_ipmb_responses;
304 /* The response didn't have anyone waiting for it. */
305 unsigned int unhandled_ipmb_responses;
307 /* Commands we sent out to the IPMB bus. */
308 unsigned int sent_lan_commands;
309 /* Commands sent on the IPMB that had errors on the SEND CMD */
310 unsigned int sent_lan_command_errs;
311 /* Each retransmit increments this count. */
312 unsigned int retransmitted_lan_commands;
313 /* When a message times out (runs out of retransmits) this is
315 unsigned int timed_out_lan_commands;
317 /* Responses I have sent to the IPMB bus. */
318 unsigned int sent_lan_responses;
320 /* The response was delivered to the user. */
321 unsigned int handled_lan_responses;
322 /* The response had invalid data in it. */
323 unsigned int invalid_lan_responses;
324 /* The response didn't have anyone waiting for it. */
325 unsigned int unhandled_lan_responses;
327 /* The command was delivered to the user. */
328 unsigned int handled_commands;
329 /* The command had invalid data in it. */
330 unsigned int invalid_commands;
331 /* The command didn't have anyone waiting for it. */
332 unsigned int unhandled_commands;
334 /* Invalid data in an event. */
335 unsigned int invalid_events;
336 /* Events that were received with the proper format. */
339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
341 /* Used to mark an interface entry that cannot be used but is not a
342 * free entry, either, primarily used at creation and deletion time so
343 * a slot doesn't get reused too quickly. */
344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
349 * The driver model view of the IPMI messaging driver.
351 static struct device_driver ipmidriver = {
353 .bus = &platform_bus_type
355 static DEFINE_MUTEX(ipmidriver_mutex);
357 #define MAX_IPMI_INTERFACES 4
358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
360 /* Directly protects the ipmi_interfaces data structure. */
361 static DEFINE_SPINLOCK(interfaces_lock);
363 /* List of watchers that want to know when smi's are added and
365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
366 static DECLARE_RWSEM(smi_watchers_sem);
369 static void free_recv_msg_list(struct list_head *q)
371 struct ipmi_recv_msg *msg, *msg2;
373 list_for_each_entry_safe(msg, msg2, q, link) {
374 list_del(&msg->link);
375 ipmi_free_recv_msg(msg);
379 static void clean_up_interface_data(ipmi_smi_t intf)
382 struct cmd_rcvr *rcvr, *rcvr2;
383 struct list_head list;
385 free_recv_msg_list(&intf->waiting_msgs);
386 free_recv_msg_list(&intf->waiting_events);
388 /* Wholesale remove all the entries from the list in the
389 * interface and wait for RCU to know that none are in use. */
390 mutex_lock(&intf->cmd_rcvrs_mutex);
391 list_add_rcu(&list, &intf->cmd_rcvrs);
392 list_del_rcu(&intf->cmd_rcvrs);
393 mutex_unlock(&intf->cmd_rcvrs_mutex);
396 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
400 if ((intf->seq_table[i].inuse)
401 && (intf->seq_table[i].recv_msg))
403 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
408 static void intf_free(struct kref *ref)
410 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
412 clean_up_interface_data(intf);
416 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
421 down_write(&smi_watchers_sem);
422 list_add(&(watcher->link), &smi_watchers);
423 up_write(&smi_watchers_sem);
424 spin_lock_irqsave(&interfaces_lock, flags);
425 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
426 ipmi_smi_t intf = ipmi_interfaces[i];
427 if (IPMI_INVALID_INTERFACE(intf))
429 spin_unlock_irqrestore(&interfaces_lock, flags);
430 watcher->new_smi(i, intf->si_dev);
431 spin_lock_irqsave(&interfaces_lock, flags);
433 spin_unlock_irqrestore(&interfaces_lock, flags);
437 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
439 down_write(&smi_watchers_sem);
440 list_del(&(watcher->link));
441 up_write(&smi_watchers_sem);
446 call_smi_watchers(int i, struct device *dev)
448 struct ipmi_smi_watcher *w;
450 down_read(&smi_watchers_sem);
451 list_for_each_entry(w, &smi_watchers, link) {
452 if (try_module_get(w->owner)) {
454 module_put(w->owner);
457 up_read(&smi_watchers_sem);
461 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
463 if (addr1->addr_type != addr2->addr_type)
466 if (addr1->channel != addr2->channel)
469 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
470 struct ipmi_system_interface_addr *smi_addr1
471 = (struct ipmi_system_interface_addr *) addr1;
472 struct ipmi_system_interface_addr *smi_addr2
473 = (struct ipmi_system_interface_addr *) addr2;
474 return (smi_addr1->lun == smi_addr2->lun);
477 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
478 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
480 struct ipmi_ipmb_addr *ipmb_addr1
481 = (struct ipmi_ipmb_addr *) addr1;
482 struct ipmi_ipmb_addr *ipmb_addr2
483 = (struct ipmi_ipmb_addr *) addr2;
485 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
486 && (ipmb_addr1->lun == ipmb_addr2->lun));
489 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
490 struct ipmi_lan_addr *lan_addr1
491 = (struct ipmi_lan_addr *) addr1;
492 struct ipmi_lan_addr *lan_addr2
493 = (struct ipmi_lan_addr *) addr2;
495 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
496 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
497 && (lan_addr1->session_handle
498 == lan_addr2->session_handle)
499 && (lan_addr1->lun == lan_addr2->lun));
505 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
507 if (len < sizeof(struct ipmi_system_interface_addr)) {
511 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
512 if (addr->channel != IPMI_BMC_CHANNEL)
517 if ((addr->channel == IPMI_BMC_CHANNEL)
518 || (addr->channel >= IPMI_MAX_CHANNELS)
519 || (addr->channel < 0))
522 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
523 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
525 if (len < sizeof(struct ipmi_ipmb_addr)) {
531 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
532 if (len < sizeof(struct ipmi_lan_addr)) {
541 unsigned int ipmi_addr_length(int addr_type)
543 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
544 return sizeof(struct ipmi_system_interface_addr);
546 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
547 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
549 return sizeof(struct ipmi_ipmb_addr);
552 if (addr_type == IPMI_LAN_ADDR_TYPE)
553 return sizeof(struct ipmi_lan_addr);
558 static void deliver_response(struct ipmi_recv_msg *msg)
561 ipmi_smi_t intf = msg->user_msg_data;
564 /* Special handling for NULL users. */
565 if (intf->null_user_handler) {
566 intf->null_user_handler(intf, msg);
567 spin_lock_irqsave(&intf->counter_lock, flags);
568 intf->handled_local_responses++;
569 spin_unlock_irqrestore(&intf->counter_lock, flags);
571 /* No handler, so give up. */
572 spin_lock_irqsave(&intf->counter_lock, flags);
573 intf->unhandled_local_responses++;
574 spin_unlock_irqrestore(&intf->counter_lock, flags);
576 ipmi_free_recv_msg(msg);
578 ipmi_user_t user = msg->user;
579 user->handler->ipmi_recv_hndl(msg, user->handler_data);
583 /* Find the next sequence number not being used and add the given
584 message with the given timeout to the sequence table. This must be
585 called with the interface's seq_lock held. */
586 static int intf_next_seq(ipmi_smi_t intf,
587 struct ipmi_recv_msg *recv_msg,
588 unsigned long timeout,
597 for (i = intf->curr_seq;
598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
599 i = (i+1)%IPMI_IPMB_NUM_SEQ)
601 if (!intf->seq_table[i].inuse)
605 if (!intf->seq_table[i].inuse) {
606 intf->seq_table[i].recv_msg = recv_msg;
608 /* Start with the maximum timeout, when the send response
609 comes in we will start the real timer. */
610 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
611 intf->seq_table[i].orig_timeout = timeout;
612 intf->seq_table[i].retries_left = retries;
613 intf->seq_table[i].broadcast = broadcast;
614 intf->seq_table[i].inuse = 1;
615 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
617 *seqid = intf->seq_table[i].seqid;
618 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
626 /* Return the receive message for the given sequence number and
627 release the sequence number so it can be reused. Some other data
628 is passed in to be sure the message matches up correctly (to help
629 guard against message coming in after their timeout and the
630 sequence number being reused). */
631 static int intf_find_seq(ipmi_smi_t intf,
636 struct ipmi_addr *addr,
637 struct ipmi_recv_msg **recv_msg)
642 if (seq >= IPMI_IPMB_NUM_SEQ)
645 spin_lock_irqsave(&(intf->seq_lock), flags);
646 if (intf->seq_table[seq].inuse) {
647 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
649 if ((msg->addr.channel == channel)
650 && (msg->msg.cmd == cmd)
651 && (msg->msg.netfn == netfn)
652 && (ipmi_addr_equal(addr, &(msg->addr))))
655 intf->seq_table[seq].inuse = 0;
659 spin_unlock_irqrestore(&(intf->seq_lock), flags);
665 /* Start the timer for a specific sequence table entry. */
666 static int intf_start_seq_timer(ipmi_smi_t intf,
675 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
677 spin_lock_irqsave(&(intf->seq_lock), flags);
678 /* We do this verification because the user can be deleted
679 while a message is outstanding. */
680 if ((intf->seq_table[seq].inuse)
681 && (intf->seq_table[seq].seqid == seqid))
683 struct seq_table *ent = &(intf->seq_table[seq]);
684 ent->timeout = ent->orig_timeout;
687 spin_unlock_irqrestore(&(intf->seq_lock), flags);
692 /* Got an error for the send message for a specific sequence number. */
693 static int intf_err_seq(ipmi_smi_t intf,
701 struct ipmi_recv_msg *msg = NULL;
704 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
706 spin_lock_irqsave(&(intf->seq_lock), flags);
707 /* We do this verification because the user can be deleted
708 while a message is outstanding. */
709 if ((intf->seq_table[seq].inuse)
710 && (intf->seq_table[seq].seqid == seqid))
712 struct seq_table *ent = &(intf->seq_table[seq]);
718 spin_unlock_irqrestore(&(intf->seq_lock), flags);
721 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
722 msg->msg_data[0] = err;
723 msg->msg.netfn |= 1; /* Convert to a response. */
724 msg->msg.data_len = 1;
725 msg->msg.data = msg->msg_data;
726 deliver_response(msg);
733 int ipmi_create_user(unsigned int if_num,
734 struct ipmi_user_hndl *handler,
739 ipmi_user_t new_user;
743 /* There is no module usecount here, because it's not
744 required. Since this can only be used by and called from
745 other modules, they will implicitly use this module, and
746 thus this can't be removed unless the other modules are
752 /* Make sure the driver is actually initialized, this handles
753 problems with initialization order. */
755 rv = ipmi_init_msghandler();
759 /* The init code doesn't return an error if it was turned
760 off, but it won't initialize. Check that. */
765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
769 spin_lock_irqsave(&interfaces_lock, flags);
770 intf = ipmi_interfaces[if_num];
771 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
772 spin_unlock_irqrestore(&interfaces_lock, flags);
777 /* Note that each existing user holds a refcount to the interface. */
778 kref_get(&intf->refcount);
779 spin_unlock_irqrestore(&interfaces_lock, flags);
781 kref_init(&new_user->refcount);
782 new_user->handler = handler;
783 new_user->handler_data = handler_data;
784 new_user->intf = intf;
785 new_user->gets_events = 0;
787 if (!try_module_get(intf->handlers->owner)) {
792 if (intf->handlers->inc_usecount) {
793 rv = intf->handlers->inc_usecount(intf->send_info);
795 module_put(intf->handlers->owner);
801 spin_lock_irqsave(&intf->seq_lock, flags);
802 list_add_rcu(&new_user->link, &intf->users);
803 spin_unlock_irqrestore(&intf->seq_lock, flags);
808 kref_put(&intf->refcount, intf_free);
814 static void free_user(struct kref *ref)
816 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
820 int ipmi_destroy_user(ipmi_user_t user)
822 ipmi_smi_t intf = user->intf;
825 struct cmd_rcvr *rcvr;
826 struct cmd_rcvr *rcvrs = NULL;
830 /* Remove the user from the interface's sequence table. */
831 spin_lock_irqsave(&intf->seq_lock, flags);
832 list_del_rcu(&user->link);
834 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
835 if (intf->seq_table[i].inuse
836 && (intf->seq_table[i].recv_msg->user == user))
838 intf->seq_table[i].inuse = 0;
841 spin_unlock_irqrestore(&intf->seq_lock, flags);
844 * Remove the user from the command receiver's table. First
845 * we build a list of everything (not using the standard link,
846 * since other things may be using it till we do
847 * synchronize_rcu()) then free everything in that list.
849 mutex_lock(&intf->cmd_rcvrs_mutex);
850 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
851 if (rcvr->user == user) {
852 list_del_rcu(&rcvr->link);
857 mutex_unlock(&intf->cmd_rcvrs_mutex);
865 module_put(intf->handlers->owner);
866 if (intf->handlers->dec_usecount)
867 intf->handlers->dec_usecount(intf->send_info);
869 kref_put(&intf->refcount, intf_free);
871 kref_put(&user->refcount, free_user);
876 void ipmi_get_version(ipmi_user_t user,
877 unsigned char *major,
878 unsigned char *minor)
880 *major = ipmi_version_major(&user->intf->bmc->id);
881 *minor = ipmi_version_minor(&user->intf->bmc->id);
884 int ipmi_set_my_address(ipmi_user_t user,
885 unsigned int channel,
886 unsigned char address)
888 if (channel >= IPMI_MAX_CHANNELS)
890 user->intf->channels[channel].address = address;
894 int ipmi_get_my_address(ipmi_user_t user,
895 unsigned int channel,
896 unsigned char *address)
898 if (channel >= IPMI_MAX_CHANNELS)
900 *address = user->intf->channels[channel].address;
904 int ipmi_set_my_LUN(ipmi_user_t user,
905 unsigned int channel,
908 if (channel >= IPMI_MAX_CHANNELS)
910 user->intf->channels[channel].lun = LUN & 0x3;
914 int ipmi_get_my_LUN(ipmi_user_t user,
915 unsigned int channel,
916 unsigned char *address)
918 if (channel >= IPMI_MAX_CHANNELS)
920 *address = user->intf->channels[channel].lun;
924 int ipmi_set_gets_events(ipmi_user_t user, int val)
927 ipmi_smi_t intf = user->intf;
928 struct ipmi_recv_msg *msg, *msg2;
929 struct list_head msgs;
931 INIT_LIST_HEAD(&msgs);
933 spin_lock_irqsave(&intf->events_lock, flags);
934 user->gets_events = val;
937 /* Deliver any queued events. */
938 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
939 list_move_tail(&msg->link, &msgs);
940 intf->waiting_events_count = 0;
943 /* Hold the events lock while doing this to preserve order. */
944 list_for_each_entry_safe(msg, msg2, &msgs, link) {
946 kref_get(&user->refcount);
947 deliver_response(msg);
950 spin_unlock_irqrestore(&intf->events_lock, flags);
955 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
960 struct cmd_rcvr *rcvr;
962 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
963 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
964 && (rcvr->chans & (1 << chan)))
970 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
975 struct cmd_rcvr *rcvr;
977 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
978 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
979 && (rcvr->chans & chans))
985 int ipmi_register_for_cmd(ipmi_user_t user,
990 ipmi_smi_t intf = user->intf;
991 struct cmd_rcvr *rcvr;
995 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1000 rcvr->chans = chans;
1003 mutex_lock(&intf->cmd_rcvrs_mutex);
1004 /* Make sure the command/netfn is not already registered. */
1005 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1010 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1013 mutex_unlock(&intf->cmd_rcvrs_mutex);
1020 int ipmi_unregister_for_cmd(ipmi_user_t user,
1021 unsigned char netfn,
1025 ipmi_smi_t intf = user->intf;
1026 struct cmd_rcvr *rcvr;
1027 struct cmd_rcvr *rcvrs = NULL;
1028 int i, rv = -ENOENT;
1030 mutex_lock(&intf->cmd_rcvrs_mutex);
1031 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1032 if (((1 << i) & chans) == 0)
1034 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1037 if (rcvr->user == user) {
1039 rcvr->chans &= ~chans;
1040 if (rcvr->chans == 0) {
1041 list_del_rcu(&rcvr->link);
1047 mutex_unlock(&intf->cmd_rcvrs_mutex);
1057 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1059 ipmi_smi_t intf = user->intf;
1060 intf->handlers->set_run_to_completion(intf->send_info, val);
1063 static unsigned char
1064 ipmb_checksum(unsigned char *data, int size)
1066 unsigned char csum = 0;
1068 for (; size > 0; size--, data++)
1074 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1075 struct kernel_ipmi_msg *msg,
1076 struct ipmi_ipmb_addr *ipmb_addr,
1078 unsigned char ipmb_seq,
1080 unsigned char source_address,
1081 unsigned char source_lun)
1085 /* Format the IPMB header data. */
1086 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1087 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1088 smi_msg->data[2] = ipmb_addr->channel;
1090 smi_msg->data[3] = 0;
1091 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1092 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1093 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1094 smi_msg->data[i+6] = source_address;
1095 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1096 smi_msg->data[i+8] = msg->cmd;
1098 /* Now tack on the data to the message. */
1099 if (msg->data_len > 0)
1100 memcpy(&(smi_msg->data[i+9]), msg->data,
1102 smi_msg->data_size = msg->data_len + 9;
1104 /* Now calculate the checksum and tack it on. */
1105 smi_msg->data[i+smi_msg->data_size]
1106 = ipmb_checksum(&(smi_msg->data[i+6]),
1107 smi_msg->data_size-6);
1109 /* Add on the checksum size and the offset from the
1111 smi_msg->data_size += 1 + i;
1113 smi_msg->msgid = msgid;
1116 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1117 struct kernel_ipmi_msg *msg,
1118 struct ipmi_lan_addr *lan_addr,
1120 unsigned char ipmb_seq,
1121 unsigned char source_lun)
1123 /* Format the IPMB header data. */
1124 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1125 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1126 smi_msg->data[2] = lan_addr->channel;
1127 smi_msg->data[3] = lan_addr->session_handle;
1128 smi_msg->data[4] = lan_addr->remote_SWID;
1129 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1130 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1131 smi_msg->data[7] = lan_addr->local_SWID;
1132 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1133 smi_msg->data[9] = msg->cmd;
1135 /* Now tack on the data to the message. */
1136 if (msg->data_len > 0)
1137 memcpy(&(smi_msg->data[10]), msg->data,
1139 smi_msg->data_size = msg->data_len + 10;
1141 /* Now calculate the checksum and tack it on. */
1142 smi_msg->data[smi_msg->data_size]
1143 = ipmb_checksum(&(smi_msg->data[7]),
1144 smi_msg->data_size-7);
1146 /* Add on the checksum size and the offset from the
1148 smi_msg->data_size += 1;
1150 smi_msg->msgid = msgid;
1153 /* Separate from ipmi_request so that the user does not have to be
1154 supplied in certain circumstances (mainly at panic time). If
1155 messages are supplied, they will be freed, even if an error
1157 static int i_ipmi_request(ipmi_user_t user,
1159 struct ipmi_addr *addr,
1161 struct kernel_ipmi_msg *msg,
1162 void *user_msg_data,
1164 struct ipmi_recv_msg *supplied_recv,
1166 unsigned char source_address,
1167 unsigned char source_lun,
1169 unsigned int retry_time_ms)
1172 struct ipmi_smi_msg *smi_msg;
1173 struct ipmi_recv_msg *recv_msg;
1174 unsigned long flags;
1177 if (supplied_recv) {
1178 recv_msg = supplied_recv;
1180 recv_msg = ipmi_alloc_recv_msg();
1181 if (recv_msg == NULL) {
1185 recv_msg->user_msg_data = user_msg_data;
1188 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1190 smi_msg = ipmi_alloc_smi_msg();
1191 if (smi_msg == NULL) {
1192 ipmi_free_recv_msg(recv_msg);
1197 recv_msg->user = user;
1199 kref_get(&user->refcount);
1200 recv_msg->msgid = msgid;
1201 /* Store the message to send in the receive message so timeout
1202 responses can get the proper response data. */
1203 recv_msg->msg = *msg;
1205 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1206 struct ipmi_system_interface_addr *smi_addr;
1208 if (msg->netfn & 1) {
1209 /* Responses are not allowed to the SMI. */
1214 smi_addr = (struct ipmi_system_interface_addr *) addr;
1215 if (smi_addr->lun > 3) {
1216 spin_lock_irqsave(&intf->counter_lock, flags);
1217 intf->sent_invalid_commands++;
1218 spin_unlock_irqrestore(&intf->counter_lock, flags);
1223 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1225 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1226 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1227 || (msg->cmd == IPMI_GET_MSG_CMD)
1228 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1230 /* We don't let the user do these, since we manage
1231 the sequence numbers. */
1232 spin_lock_irqsave(&intf->counter_lock, flags);
1233 intf->sent_invalid_commands++;
1234 spin_unlock_irqrestore(&intf->counter_lock, flags);
1239 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1240 spin_lock_irqsave(&intf->counter_lock, flags);
1241 intf->sent_invalid_commands++;
1242 spin_unlock_irqrestore(&intf->counter_lock, flags);
1247 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1248 smi_msg->data[1] = msg->cmd;
1249 smi_msg->msgid = msgid;
1250 smi_msg->user_data = recv_msg;
1251 if (msg->data_len > 0)
1252 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1253 smi_msg->data_size = msg->data_len + 2;
1254 spin_lock_irqsave(&intf->counter_lock, flags);
1255 intf->sent_local_commands++;
1256 spin_unlock_irqrestore(&intf->counter_lock, flags);
1257 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1258 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1260 struct ipmi_ipmb_addr *ipmb_addr;
1261 unsigned char ipmb_seq;
1265 if (addr->channel >= IPMI_MAX_CHANNELS) {
1266 spin_lock_irqsave(&intf->counter_lock, flags);
1267 intf->sent_invalid_commands++;
1268 spin_unlock_irqrestore(&intf->counter_lock, flags);
1273 if (intf->channels[addr->channel].medium
1274 != IPMI_CHANNEL_MEDIUM_IPMB)
1276 spin_lock_irqsave(&intf->counter_lock, flags);
1277 intf->sent_invalid_commands++;
1278 spin_unlock_irqrestore(&intf->counter_lock, flags);
1284 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1285 retries = 0; /* Don't retry broadcasts. */
1289 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1290 /* Broadcasts add a zero at the beginning of the
1291 message, but otherwise is the same as an IPMB
1293 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1298 /* Default to 1 second retries. */
1299 if (retry_time_ms == 0)
1300 retry_time_ms = 1000;
1302 /* 9 for the header and 1 for the checksum, plus
1303 possibly one for the broadcast. */
1304 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1305 spin_lock_irqsave(&intf->counter_lock, flags);
1306 intf->sent_invalid_commands++;
1307 spin_unlock_irqrestore(&intf->counter_lock, flags);
1312 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1313 if (ipmb_addr->lun > 3) {
1314 spin_lock_irqsave(&intf->counter_lock, flags);
1315 intf->sent_invalid_commands++;
1316 spin_unlock_irqrestore(&intf->counter_lock, flags);
1321 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1323 if (recv_msg->msg.netfn & 0x1) {
1324 /* It's a response, so use the user's sequence
1326 spin_lock_irqsave(&intf->counter_lock, flags);
1327 intf->sent_ipmb_responses++;
1328 spin_unlock_irqrestore(&intf->counter_lock, flags);
1329 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1331 source_address, source_lun);
1333 /* Save the receive message so we can use it
1334 to deliver the response. */
1335 smi_msg->user_data = recv_msg;
1337 /* It's a command, so get a sequence for it. */
1339 spin_lock_irqsave(&(intf->seq_lock), flags);
1341 spin_lock(&intf->counter_lock);
1342 intf->sent_ipmb_commands++;
1343 spin_unlock(&intf->counter_lock);
1345 /* Create a sequence number with a 1 second
1346 timeout and 4 retries. */
1347 rv = intf_next_seq(intf,
1355 /* We have used up all the sequence numbers,
1356 probably, so abort. */
1357 spin_unlock_irqrestore(&(intf->seq_lock),
1362 /* Store the sequence number in the message,
1363 so that when the send message response
1364 comes back we can start the timer. */
1365 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1366 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1367 ipmb_seq, broadcast,
1368 source_address, source_lun);
1370 /* Copy the message into the recv message data, so we
1371 can retransmit it later if necessary. */
1372 memcpy(recv_msg->msg_data, smi_msg->data,
1373 smi_msg->data_size);
1374 recv_msg->msg.data = recv_msg->msg_data;
1375 recv_msg->msg.data_len = smi_msg->data_size;
1377 /* We don't unlock until here, because we need
1378 to copy the completed message into the
1379 recv_msg before we release the lock.
1380 Otherwise, race conditions may bite us. I
1381 know that's pretty paranoid, but I prefer
1383 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1385 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1386 struct ipmi_lan_addr *lan_addr;
1387 unsigned char ipmb_seq;
1390 if (addr->channel >= IPMI_MAX_CHANNELS) {
1391 spin_lock_irqsave(&intf->counter_lock, flags);
1392 intf->sent_invalid_commands++;
1393 spin_unlock_irqrestore(&intf->counter_lock, flags);
1398 if ((intf->channels[addr->channel].medium
1399 != IPMI_CHANNEL_MEDIUM_8023LAN)
1400 && (intf->channels[addr->channel].medium
1401 != IPMI_CHANNEL_MEDIUM_ASYNC))
1403 spin_lock_irqsave(&intf->counter_lock, flags);
1404 intf->sent_invalid_commands++;
1405 spin_unlock_irqrestore(&intf->counter_lock, flags);
1412 /* Default to 1 second retries. */
1413 if (retry_time_ms == 0)
1414 retry_time_ms = 1000;
1416 /* 11 for the header and 1 for the checksum. */
1417 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1418 spin_lock_irqsave(&intf->counter_lock, flags);
1419 intf->sent_invalid_commands++;
1420 spin_unlock_irqrestore(&intf->counter_lock, flags);
1425 lan_addr = (struct ipmi_lan_addr *) addr;
1426 if (lan_addr->lun > 3) {
1427 spin_lock_irqsave(&intf->counter_lock, flags);
1428 intf->sent_invalid_commands++;
1429 spin_unlock_irqrestore(&intf->counter_lock, flags);
1434 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1436 if (recv_msg->msg.netfn & 0x1) {
1437 /* It's a response, so use the user's sequence
1439 spin_lock_irqsave(&intf->counter_lock, flags);
1440 intf->sent_lan_responses++;
1441 spin_unlock_irqrestore(&intf->counter_lock, flags);
1442 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1445 /* Save the receive message so we can use it
1446 to deliver the response. */
1447 smi_msg->user_data = recv_msg;
1449 /* It's a command, so get a sequence for it. */
1451 spin_lock_irqsave(&(intf->seq_lock), flags);
1453 spin_lock(&intf->counter_lock);
1454 intf->sent_lan_commands++;
1455 spin_unlock(&intf->counter_lock);
1457 /* Create a sequence number with a 1 second
1458 timeout and 4 retries. */
1459 rv = intf_next_seq(intf,
1467 /* We have used up all the sequence numbers,
1468 probably, so abort. */
1469 spin_unlock_irqrestore(&(intf->seq_lock),
1474 /* Store the sequence number in the message,
1475 so that when the send message response
1476 comes back we can start the timer. */
1477 format_lan_msg(smi_msg, msg, lan_addr,
1478 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1479 ipmb_seq, source_lun);
1481 /* Copy the message into the recv message data, so we
1482 can retransmit it later if necessary. */
1483 memcpy(recv_msg->msg_data, smi_msg->data,
1484 smi_msg->data_size);
1485 recv_msg->msg.data = recv_msg->msg_data;
1486 recv_msg->msg.data_len = smi_msg->data_size;
1488 /* We don't unlock until here, because we need
1489 to copy the completed message into the
1490 recv_msg before we release the lock.
1491 Otherwise, race conditions may bite us. I
1492 know that's pretty paranoid, but I prefer
1494 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1497 /* Unknown address type. */
1498 spin_lock_irqsave(&intf->counter_lock, flags);
1499 intf->sent_invalid_commands++;
1500 spin_unlock_irqrestore(&intf->counter_lock, flags);
1508 for (m = 0; m < smi_msg->data_size; m++)
1509 printk(" %2.2x", smi_msg->data[m]);
1513 intf->handlers->sender(intf->send_info, smi_msg, priority);
1518 ipmi_free_smi_msg(smi_msg);
1519 ipmi_free_recv_msg(recv_msg);
1523 static int check_addr(ipmi_smi_t intf,
1524 struct ipmi_addr *addr,
1525 unsigned char *saddr,
1528 if (addr->channel >= IPMI_MAX_CHANNELS)
1530 *lun = intf->channels[addr->channel].lun;
1531 *saddr = intf->channels[addr->channel].address;
1535 int ipmi_request_settime(ipmi_user_t user,
1536 struct ipmi_addr *addr,
1538 struct kernel_ipmi_msg *msg,
1539 void *user_msg_data,
1542 unsigned int retry_time_ms)
1544 unsigned char saddr, lun;
1549 rv = check_addr(user->intf, addr, &saddr, &lun);
1552 return i_ipmi_request(user,
1566 int ipmi_request_supply_msgs(ipmi_user_t user,
1567 struct ipmi_addr *addr,
1569 struct kernel_ipmi_msg *msg,
1570 void *user_msg_data,
1572 struct ipmi_recv_msg *supplied_recv,
1575 unsigned char saddr, lun;
1580 rv = check_addr(user->intf, addr, &saddr, &lun);
1583 return i_ipmi_request(user,
1597 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1598 int count, int *eof, void *data)
1600 char *out = (char *) page;
1601 ipmi_smi_t intf = data;
1605 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1606 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1607 out[rv-1] = '\n'; /* Replace the final space with a newline */
1613 static int version_file_read_proc(char *page, char **start, off_t off,
1614 int count, int *eof, void *data)
1616 char *out = (char *) page;
1617 ipmi_smi_t intf = data;
1619 return sprintf(out, "%d.%d\n",
1620 ipmi_version_major(&intf->bmc->id),
1621 ipmi_version_minor(&intf->bmc->id));
1624 static int stat_file_read_proc(char *page, char **start, off_t off,
1625 int count, int *eof, void *data)
1627 char *out = (char *) page;
1628 ipmi_smi_t intf = data;
1630 out += sprintf(out, "sent_invalid_commands: %d\n",
1631 intf->sent_invalid_commands);
1632 out += sprintf(out, "sent_local_commands: %d\n",
1633 intf->sent_local_commands);
1634 out += sprintf(out, "handled_local_responses: %d\n",
1635 intf->handled_local_responses);
1636 out += sprintf(out, "unhandled_local_responses: %d\n",
1637 intf->unhandled_local_responses);
1638 out += sprintf(out, "sent_ipmb_commands: %d\n",
1639 intf->sent_ipmb_commands);
1640 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1641 intf->sent_ipmb_command_errs);
1642 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1643 intf->retransmitted_ipmb_commands);
1644 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1645 intf->timed_out_ipmb_commands);
1646 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1647 intf->timed_out_ipmb_broadcasts);
1648 out += sprintf(out, "sent_ipmb_responses: %d\n",
1649 intf->sent_ipmb_responses);
1650 out += sprintf(out, "handled_ipmb_responses: %d\n",
1651 intf->handled_ipmb_responses);
1652 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1653 intf->invalid_ipmb_responses);
1654 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1655 intf->unhandled_ipmb_responses);
1656 out += sprintf(out, "sent_lan_commands: %d\n",
1657 intf->sent_lan_commands);
1658 out += sprintf(out, "sent_lan_command_errs: %d\n",
1659 intf->sent_lan_command_errs);
1660 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1661 intf->retransmitted_lan_commands);
1662 out += sprintf(out, "timed_out_lan_commands: %d\n",
1663 intf->timed_out_lan_commands);
1664 out += sprintf(out, "sent_lan_responses: %d\n",
1665 intf->sent_lan_responses);
1666 out += sprintf(out, "handled_lan_responses: %d\n",
1667 intf->handled_lan_responses);
1668 out += sprintf(out, "invalid_lan_responses: %d\n",
1669 intf->invalid_lan_responses);
1670 out += sprintf(out, "unhandled_lan_responses: %d\n",
1671 intf->unhandled_lan_responses);
1672 out += sprintf(out, "handled_commands: %d\n",
1673 intf->handled_commands);
1674 out += sprintf(out, "invalid_commands: %d\n",
1675 intf->invalid_commands);
1676 out += sprintf(out, "unhandled_commands: %d\n",
1677 intf->unhandled_commands);
1678 out += sprintf(out, "invalid_events: %d\n",
1679 intf->invalid_events);
1680 out += sprintf(out, "events: %d\n",
1683 return (out - ((char *) page));
1686 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1687 read_proc_t *read_proc, write_proc_t *write_proc,
1688 void *data, struct module *owner)
1691 #ifdef CONFIG_PROC_FS
1692 struct proc_dir_entry *file;
1693 struct ipmi_proc_entry *entry;
1695 /* Create a list element. */
1696 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1699 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1704 strcpy(entry->name, name);
1706 file = create_proc_entry(name, 0, smi->proc_dir);
1714 file->read_proc = read_proc;
1715 file->write_proc = write_proc;
1716 file->owner = owner;
1718 spin_lock(&smi->proc_entry_lock);
1719 /* Stick it on the list. */
1720 entry->next = smi->proc_entries;
1721 smi->proc_entries = entry;
1722 spin_unlock(&smi->proc_entry_lock);
1724 #endif /* CONFIG_PROC_FS */
1729 static int add_proc_entries(ipmi_smi_t smi, int num)
1733 #ifdef CONFIG_PROC_FS
1734 sprintf(smi->proc_dir_name, "%d", num);
1735 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1739 smi->proc_dir->owner = THIS_MODULE;
1743 rv = ipmi_smi_add_proc_entry(smi, "stats",
1744 stat_file_read_proc, NULL,
1748 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1749 ipmb_file_read_proc, NULL,
1753 rv = ipmi_smi_add_proc_entry(smi, "version",
1754 version_file_read_proc, NULL,
1756 #endif /* CONFIG_PROC_FS */
1761 static void remove_proc_entries(ipmi_smi_t smi)
1763 #ifdef CONFIG_PROC_FS
1764 struct ipmi_proc_entry *entry;
1766 spin_lock(&smi->proc_entry_lock);
1767 while (smi->proc_entries) {
1768 entry = smi->proc_entries;
1769 smi->proc_entries = entry->next;
1771 remove_proc_entry(entry->name, smi->proc_dir);
1775 spin_unlock(&smi->proc_entry_lock);
1776 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1777 #endif /* CONFIG_PROC_FS */
1780 static int __find_bmc_guid(struct device *dev, void *data)
1782 unsigned char *id = data;
1783 struct bmc_device *bmc = dev_get_drvdata(dev);
1784 return memcmp(bmc->guid, id, 16) == 0;
1787 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1788 unsigned char *guid)
1792 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1794 return dev_get_drvdata(dev);
1799 struct prod_dev_id {
1800 unsigned int product_id;
1801 unsigned char device_id;
1804 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1806 struct prod_dev_id *id = data;
1807 struct bmc_device *bmc = dev_get_drvdata(dev);
1809 return (bmc->id.product_id == id->product_id
1810 && bmc->id.product_id == id->product_id
1811 && bmc->id.device_id == id->device_id);
1814 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1815 struct device_driver *drv,
1816 unsigned char product_id, unsigned char device_id)
1818 struct prod_dev_id id = {
1819 .product_id = product_id,
1820 .device_id = device_id,
1824 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1826 return dev_get_drvdata(dev);
1831 static ssize_t device_id_show(struct device *dev,
1832 struct device_attribute *attr,
1835 struct bmc_device *bmc = dev_get_drvdata(dev);
1837 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1840 static ssize_t provides_dev_sdrs_show(struct device *dev,
1841 struct device_attribute *attr,
1844 struct bmc_device *bmc = dev_get_drvdata(dev);
1846 return snprintf(buf, 10, "%u\n",
1847 bmc->id.device_revision && 0x80 >> 7);
1850 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1853 struct bmc_device *bmc = dev_get_drvdata(dev);
1855 return snprintf(buf, 20, "%u\n",
1856 bmc->id.device_revision && 0x0F);
1859 static ssize_t firmware_rev_show(struct device *dev,
1860 struct device_attribute *attr,
1863 struct bmc_device *bmc = dev_get_drvdata(dev);
1865 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1866 bmc->id.firmware_revision_2);
1869 static ssize_t ipmi_version_show(struct device *dev,
1870 struct device_attribute *attr,
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1875 return snprintf(buf, 20, "%u.%u\n",
1876 ipmi_version_major(&bmc->id),
1877 ipmi_version_minor(&bmc->id));
1880 static ssize_t add_dev_support_show(struct device *dev,
1881 struct device_attribute *attr,
1884 struct bmc_device *bmc = dev_get_drvdata(dev);
1886 return snprintf(buf, 10, "0x%02x\n",
1887 bmc->id.additional_device_support);
1890 static ssize_t manufacturer_id_show(struct device *dev,
1891 struct device_attribute *attr,
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1896 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1899 static ssize_t product_id_show(struct device *dev,
1900 struct device_attribute *attr,
1903 struct bmc_device *bmc = dev_get_drvdata(dev);
1905 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1908 static ssize_t aux_firmware_rev_show(struct device *dev,
1909 struct device_attribute *attr,
1912 struct bmc_device *bmc = dev_get_drvdata(dev);
1914 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1915 bmc->id.aux_firmware_revision[3],
1916 bmc->id.aux_firmware_revision[2],
1917 bmc->id.aux_firmware_revision[1],
1918 bmc->id.aux_firmware_revision[0]);
1921 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1924 struct bmc_device *bmc = dev_get_drvdata(dev);
1926 return snprintf(buf, 100, "%Lx%Lx\n",
1927 (long long) bmc->guid[0],
1928 (long long) bmc->guid[8]);
1932 cleanup_bmc_device(struct kref *ref)
1934 struct bmc_device *bmc;
1936 bmc = container_of(ref, struct bmc_device, refcount);
1938 device_remove_file(&bmc->dev->dev,
1939 &bmc->device_id_attr);
1940 device_remove_file(&bmc->dev->dev,
1941 &bmc->provides_dev_sdrs_attr);
1942 device_remove_file(&bmc->dev->dev,
1943 &bmc->revision_attr);
1944 device_remove_file(&bmc->dev->dev,
1945 &bmc->firmware_rev_attr);
1946 device_remove_file(&bmc->dev->dev,
1947 &bmc->version_attr);
1948 device_remove_file(&bmc->dev->dev,
1949 &bmc->add_dev_support_attr);
1950 device_remove_file(&bmc->dev->dev,
1951 &bmc->manufacturer_id_attr);
1952 device_remove_file(&bmc->dev->dev,
1953 &bmc->product_id_attr);
1954 if (bmc->id.aux_firmware_revision_set)
1955 device_remove_file(&bmc->dev->dev,
1956 &bmc->aux_firmware_rev_attr);
1958 device_remove_file(&bmc->dev->dev,
1960 platform_device_unregister(bmc->dev);
1964 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1966 struct bmc_device *bmc = intf->bmc;
1968 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1969 if (intf->my_dev_name) {
1970 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1971 kfree(intf->my_dev_name);
1972 intf->my_dev_name = NULL;
1975 mutex_lock(&ipmidriver_mutex);
1976 kref_put(&bmc->refcount, cleanup_bmc_device);
1977 mutex_unlock(&ipmidriver_mutex);
1980 static int ipmi_bmc_register(ipmi_smi_t intf)
1983 struct bmc_device *bmc = intf->bmc;
1984 struct bmc_device *old_bmc;
1988 mutex_lock(&ipmidriver_mutex);
1991 * Try to find if there is an bmc_device struct
1992 * representing the interfaced BMC already
1995 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1997 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2002 * If there is already an bmc_device, free the new one,
2003 * otherwise register the new BMC device
2007 intf->bmc = old_bmc;
2010 kref_get(&bmc->refcount);
2011 mutex_unlock(&ipmidriver_mutex);
2014 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2015 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2016 bmc->id.manufacturer_id,
2020 bmc->dev = platform_device_alloc("ipmi_bmc",
2025 " Unable to allocate platform device\n");
2028 bmc->dev->dev.driver = &ipmidriver;
2029 dev_set_drvdata(&bmc->dev->dev, bmc);
2030 kref_init(&bmc->refcount);
2032 rv = platform_device_register(bmc->dev);
2033 mutex_unlock(&ipmidriver_mutex);
2037 " Unable to register bmc device: %d\n",
2039 /* Don't go to out_err, you can only do that if
2040 the device is registered already. */
2044 bmc->device_id_attr.attr.name = "device_id";
2045 bmc->device_id_attr.attr.owner = THIS_MODULE;
2046 bmc->device_id_attr.attr.mode = S_IRUGO;
2047 bmc->device_id_attr.show = device_id_show;
2049 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2050 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2051 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2052 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2055 bmc->revision_attr.attr.name = "revision";
2056 bmc->revision_attr.attr.owner = THIS_MODULE;
2057 bmc->revision_attr.attr.mode = S_IRUGO;
2058 bmc->revision_attr.show = revision_show;
2060 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2061 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2062 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2063 bmc->firmware_rev_attr.show = firmware_rev_show;
2065 bmc->version_attr.attr.name = "ipmi_version";
2066 bmc->version_attr.attr.owner = THIS_MODULE;
2067 bmc->version_attr.attr.mode = S_IRUGO;
2068 bmc->version_attr.show = ipmi_version_show;
2070 bmc->add_dev_support_attr.attr.name
2071 = "additional_device_support";
2072 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2073 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2074 bmc->add_dev_support_attr.show = add_dev_support_show;
2076 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2077 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2078 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2079 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2081 bmc->product_id_attr.attr.name = "product_id";
2082 bmc->product_id_attr.attr.owner = THIS_MODULE;
2083 bmc->product_id_attr.attr.mode = S_IRUGO;
2084 bmc->product_id_attr.show = product_id_show;
2086 bmc->guid_attr.attr.name = "guid";
2087 bmc->guid_attr.attr.owner = THIS_MODULE;
2088 bmc->guid_attr.attr.mode = S_IRUGO;
2089 bmc->guid_attr.show = guid_show;
2091 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2092 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2093 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2094 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2096 device_create_file(&bmc->dev->dev,
2097 &bmc->device_id_attr);
2098 device_create_file(&bmc->dev->dev,
2099 &bmc->provides_dev_sdrs_attr);
2100 device_create_file(&bmc->dev->dev,
2101 &bmc->revision_attr);
2102 device_create_file(&bmc->dev->dev,
2103 &bmc->firmware_rev_attr);
2104 device_create_file(&bmc->dev->dev,
2105 &bmc->version_attr);
2106 device_create_file(&bmc->dev->dev,
2107 &bmc->add_dev_support_attr);
2108 device_create_file(&bmc->dev->dev,
2109 &bmc->manufacturer_id_attr);
2110 device_create_file(&bmc->dev->dev,
2111 &bmc->product_id_attr);
2112 if (bmc->id.aux_firmware_revision_set)
2113 device_create_file(&bmc->dev->dev,
2114 &bmc->aux_firmware_rev_attr);
2116 device_create_file(&bmc->dev->dev,
2120 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2121 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2122 bmc->id.manufacturer_id,
2128 * create symlink from system interface device to bmc device
2131 rv = sysfs_create_link(&intf->si_dev->kobj,
2132 &bmc->dev->dev.kobj, "bmc");
2135 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2140 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2141 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2142 if (!intf->my_dev_name) {
2145 "ipmi_msghandler: allocate link from BMC: %d\n",
2149 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2151 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2154 kfree(intf->my_dev_name);
2155 intf->my_dev_name = NULL;
2158 " Unable to create symlink to bmc: %d\n",
2166 ipmi_bmc_unregister(intf);
2171 send_guid_cmd(ipmi_smi_t intf, int chan)
2173 struct kernel_ipmi_msg msg;
2174 struct ipmi_system_interface_addr si;
2176 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2177 si.channel = IPMI_BMC_CHANNEL;
2180 msg.netfn = IPMI_NETFN_APP_REQUEST;
2181 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2184 return i_ipmi_request(NULL,
2186 (struct ipmi_addr *) &si,
2193 intf->channels[0].address,
2194 intf->channels[0].lun,
2199 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2201 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2202 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2203 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2207 if (msg->msg.data[0] != 0) {
2208 /* Error from getting the GUID, the BMC doesn't have one. */
2209 intf->bmc->guid_set = 0;
2213 if (msg->msg.data_len < 17) {
2214 intf->bmc->guid_set = 0;
2215 printk(KERN_WARNING PFX
2216 "guid_handler: The GUID response from the BMC was too"
2217 " short, it was %d but should have been 17. Assuming"
2218 " GUID is not available.\n",
2223 memcpy(intf->bmc->guid, msg->msg.data, 16);
2224 intf->bmc->guid_set = 1;
2226 wake_up(&intf->waitq);
2230 get_guid(ipmi_smi_t intf)
2234 intf->bmc->guid_set = 0x2;
2235 intf->null_user_handler = guid_handler;
2236 rv = send_guid_cmd(intf, 0);
2238 /* Send failed, no GUID available. */
2239 intf->bmc->guid_set = 0;
2240 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2241 intf->null_user_handler = NULL;
2245 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2247 struct kernel_ipmi_msg msg;
2248 unsigned char data[1];
2249 struct ipmi_system_interface_addr si;
2251 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2252 si.channel = IPMI_BMC_CHANNEL;
2255 msg.netfn = IPMI_NETFN_APP_REQUEST;
2256 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2260 return i_ipmi_request(NULL,
2262 (struct ipmi_addr *) &si,
2269 intf->channels[0].address,
2270 intf->channels[0].lun,
2275 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2280 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2281 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2282 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2284 /* It's the one we want */
2285 if (msg->msg.data[0] != 0) {
2286 /* Got an error from the channel, just go on. */
2288 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2289 /* If the MC does not support this
2290 command, that is legal. We just
2291 assume it has one IPMB at channel
2293 intf->channels[0].medium
2294 = IPMI_CHANNEL_MEDIUM_IPMB;
2295 intf->channels[0].protocol
2296 = IPMI_CHANNEL_PROTOCOL_IPMB;
2299 intf->curr_channel = IPMI_MAX_CHANNELS;
2300 wake_up(&intf->waitq);
2305 if (msg->msg.data_len < 4) {
2306 /* Message not big enough, just go on. */
2309 chan = intf->curr_channel;
2310 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2311 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2314 intf->curr_channel++;
2315 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2316 wake_up(&intf->waitq);
2318 rv = send_channel_info_cmd(intf, intf->curr_channel);
2321 /* Got an error somehow, just give up. */
2322 intf->curr_channel = IPMI_MAX_CHANNELS;
2323 wake_up(&intf->waitq);
2325 printk(KERN_WARNING PFX
2326 "Error sending channel information: %d\n",
2334 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2336 struct ipmi_device_id *device_id,
2337 struct device *si_dev,
2338 unsigned char slave_addr)
2343 unsigned long flags;
2347 version_major = ipmi_version_major(device_id);
2348 version_minor = ipmi_version_minor(device_id);
2350 /* Make sure the driver is actually initialized, this handles
2351 problems with initialization order. */
2353 rv = ipmi_init_msghandler();
2356 /* The init code doesn't return an error if it was turned
2357 off, but it won't initialize. Check that. */
2362 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2365 memset(intf, 0, sizeof(*intf));
2366 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2371 intf->intf_num = -1;
2372 kref_init(&intf->refcount);
2373 intf->bmc->id = *device_id;
2374 intf->si_dev = si_dev;
2375 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2376 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2377 intf->channels[j].lun = 2;
2379 if (slave_addr != 0)
2380 intf->channels[0].address = slave_addr;
2381 INIT_LIST_HEAD(&intf->users);
2382 intf->handlers = handlers;
2383 intf->send_info = send_info;
2384 spin_lock_init(&intf->seq_lock);
2385 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2386 intf->seq_table[j].inuse = 0;
2387 intf->seq_table[j].seqid = 0;
2390 #ifdef CONFIG_PROC_FS
2391 spin_lock_init(&intf->proc_entry_lock);
2393 spin_lock_init(&intf->waiting_msgs_lock);
2394 INIT_LIST_HEAD(&intf->waiting_msgs);
2395 spin_lock_init(&intf->events_lock);
2396 INIT_LIST_HEAD(&intf->waiting_events);
2397 intf->waiting_events_count = 0;
2398 mutex_init(&intf->cmd_rcvrs_mutex);
2399 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2400 init_waitqueue_head(&intf->waitq);
2402 spin_lock_init(&intf->counter_lock);
2403 intf->proc_dir = NULL;
2406 spin_lock_irqsave(&interfaces_lock, flags);
2407 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2408 if (ipmi_interfaces[i] == NULL) {
2410 /* Reserve the entry till we are done. */
2411 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2416 spin_unlock_irqrestore(&interfaces_lock, flags);
2420 rv = handlers->start_processing(send_info, intf);
2426 if ((version_major > 1)
2427 || ((version_major == 1) && (version_minor >= 5)))
2429 /* Start scanning the channels to see what is
2431 intf->null_user_handler = channel_handler;
2432 intf->curr_channel = 0;
2433 rv = send_channel_info_cmd(intf, 0);
2437 /* Wait for the channel info to be read. */
2438 wait_event(intf->waitq,
2439 intf->curr_channel >= IPMI_MAX_CHANNELS);
2440 intf->null_user_handler = NULL;
2442 /* Assume a single IPMB channel at zero. */
2443 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2444 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2448 rv = add_proc_entries(intf, i);
2450 rv = ipmi_bmc_register(intf);
2455 remove_proc_entries(intf);
2456 kref_put(&intf->refcount, intf_free);
2457 if (i < MAX_IPMI_INTERFACES) {
2458 spin_lock_irqsave(&interfaces_lock, flags);
2459 ipmi_interfaces[i] = NULL;
2460 spin_unlock_irqrestore(&interfaces_lock, flags);
2463 spin_lock_irqsave(&interfaces_lock, flags);
2464 ipmi_interfaces[i] = intf;
2465 spin_unlock_irqrestore(&interfaces_lock, flags);
2466 call_smi_watchers(i, intf->si_dev);
2472 int ipmi_unregister_smi(ipmi_smi_t intf)
2475 struct ipmi_smi_watcher *w;
2476 unsigned long flags;
2478 ipmi_bmc_unregister(intf);
2480 spin_lock_irqsave(&interfaces_lock, flags);
2481 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2482 if (ipmi_interfaces[i] == intf) {
2483 /* Set the interface number reserved until we
2485 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2486 intf->intf_num = -1;
2490 spin_unlock_irqrestore(&interfaces_lock,flags);
2492 if (i == MAX_IPMI_INTERFACES)
2495 remove_proc_entries(intf);
2497 /* Call all the watcher interfaces to tell them that
2498 an interface is gone. */
2499 down_read(&smi_watchers_sem);
2500 list_for_each_entry(w, &smi_watchers, link)
2502 up_read(&smi_watchers_sem);
2504 /* Allow the entry to be reused now. */
2505 spin_lock_irqsave(&interfaces_lock, flags);
2506 ipmi_interfaces[i] = NULL;
2507 spin_unlock_irqrestore(&interfaces_lock,flags);
2509 kref_put(&intf->refcount, intf_free);
2513 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2514 struct ipmi_smi_msg *msg)
2516 struct ipmi_ipmb_addr ipmb_addr;
2517 struct ipmi_recv_msg *recv_msg;
2518 unsigned long flags;
2521 /* This is 11, not 10, because the response must contain a
2522 * completion code. */
2523 if (msg->rsp_size < 11) {
2524 /* Message not big enough, just ignore it. */
2525 spin_lock_irqsave(&intf->counter_lock, flags);
2526 intf->invalid_ipmb_responses++;
2527 spin_unlock_irqrestore(&intf->counter_lock, flags);
2531 if (msg->rsp[2] != 0) {
2532 /* An error getting the response, just ignore it. */
2536 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2537 ipmb_addr.slave_addr = msg->rsp[6];
2538 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2539 ipmb_addr.lun = msg->rsp[7] & 3;
2541 /* It's a response from a remote entity. Look up the sequence
2542 number and handle the response. */
2543 if (intf_find_seq(intf,
2547 (msg->rsp[4] >> 2) & (~1),
2548 (struct ipmi_addr *) &(ipmb_addr),
2551 /* We were unable to find the sequence number,
2552 so just nuke the message. */
2553 spin_lock_irqsave(&intf->counter_lock, flags);
2554 intf->unhandled_ipmb_responses++;
2555 spin_unlock_irqrestore(&intf->counter_lock, flags);
2559 memcpy(recv_msg->msg_data,
2562 /* THe other fields matched, so no need to set them, except
2563 for netfn, which needs to be the response that was
2564 returned, not the request value. */
2565 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2566 recv_msg->msg.data = recv_msg->msg_data;
2567 recv_msg->msg.data_len = msg->rsp_size - 10;
2568 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2569 spin_lock_irqsave(&intf->counter_lock, flags);
2570 intf->handled_ipmb_responses++;
2571 spin_unlock_irqrestore(&intf->counter_lock, flags);
2572 deliver_response(recv_msg);
2577 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2578 struct ipmi_smi_msg *msg)
2580 struct cmd_rcvr *rcvr;
2582 unsigned char netfn;
2585 ipmi_user_t user = NULL;
2586 struct ipmi_ipmb_addr *ipmb_addr;
2587 struct ipmi_recv_msg *recv_msg;
2588 unsigned long flags;
2590 if (msg->rsp_size < 10) {
2591 /* Message not big enough, just ignore it. */
2592 spin_lock_irqsave(&intf->counter_lock, flags);
2593 intf->invalid_commands++;
2594 spin_unlock_irqrestore(&intf->counter_lock, flags);
2598 if (msg->rsp[2] != 0) {
2599 /* An error getting the response, just ignore it. */
2603 netfn = msg->rsp[4] >> 2;
2605 chan = msg->rsp[3] & 0xf;
2608 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2611 kref_get(&user->refcount);
2617 /* We didn't find a user, deliver an error response. */
2618 spin_lock_irqsave(&intf->counter_lock, flags);
2619 intf->unhandled_commands++;
2620 spin_unlock_irqrestore(&intf->counter_lock, flags);
2622 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2623 msg->data[1] = IPMI_SEND_MSG_CMD;
2624 msg->data[2] = msg->rsp[3];
2625 msg->data[3] = msg->rsp[6];
2626 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2627 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2628 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2630 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2631 msg->data[8] = msg->rsp[8]; /* cmd */
2632 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2633 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2634 msg->data_size = 11;
2639 printk("Invalid command:");
2640 for (m = 0; m < msg->data_size; m++)
2641 printk(" %2.2x", msg->data[m]);
2645 intf->handlers->sender(intf->send_info, msg, 0);
2647 rv = -1; /* We used the message, so return the value that
2648 causes it to not be freed or queued. */
2650 /* Deliver the message to the user. */
2651 spin_lock_irqsave(&intf->counter_lock, flags);
2652 intf->handled_commands++;
2653 spin_unlock_irqrestore(&intf->counter_lock, flags);
2655 recv_msg = ipmi_alloc_recv_msg();
2657 /* We couldn't allocate memory for the
2658 message, so requeue it for handling
2661 kref_put(&user->refcount, free_user);
2663 /* Extract the source address from the data. */
2664 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2665 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2666 ipmb_addr->slave_addr = msg->rsp[6];
2667 ipmb_addr->lun = msg->rsp[7] & 3;
2668 ipmb_addr->channel = msg->rsp[3] & 0xf;
2670 /* Extract the rest of the message information
2671 from the IPMB header.*/
2672 recv_msg->user = user;
2673 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2674 recv_msg->msgid = msg->rsp[7] >> 2;
2675 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2676 recv_msg->msg.cmd = msg->rsp[8];
2677 recv_msg->msg.data = recv_msg->msg_data;
2679 /* We chop off 10, not 9 bytes because the checksum
2680 at the end also needs to be removed. */
2681 recv_msg->msg.data_len = msg->rsp_size - 10;
2682 memcpy(recv_msg->msg_data,
2684 msg->rsp_size - 10);
2685 deliver_response(recv_msg);
2692 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2693 struct ipmi_smi_msg *msg)
2695 struct ipmi_lan_addr lan_addr;
2696 struct ipmi_recv_msg *recv_msg;
2697 unsigned long flags;
2700 /* This is 13, not 12, because the response must contain a
2701 * completion code. */
2702 if (msg->rsp_size < 13) {
2703 /* Message not big enough, just ignore it. */
2704 spin_lock_irqsave(&intf->counter_lock, flags);
2705 intf->invalid_lan_responses++;
2706 spin_unlock_irqrestore(&intf->counter_lock, flags);
2710 if (msg->rsp[2] != 0) {
2711 /* An error getting the response, just ignore it. */
2715 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2716 lan_addr.session_handle = msg->rsp[4];
2717 lan_addr.remote_SWID = msg->rsp[8];
2718 lan_addr.local_SWID = msg->rsp[5];
2719 lan_addr.channel = msg->rsp[3] & 0x0f;
2720 lan_addr.privilege = msg->rsp[3] >> 4;
2721 lan_addr.lun = msg->rsp[9] & 3;
2723 /* It's a response from a remote entity. Look up the sequence
2724 number and handle the response. */
2725 if (intf_find_seq(intf,
2729 (msg->rsp[6] >> 2) & (~1),
2730 (struct ipmi_addr *) &(lan_addr),
2733 /* We were unable to find the sequence number,
2734 so just nuke the message. */
2735 spin_lock_irqsave(&intf->counter_lock, flags);
2736 intf->unhandled_lan_responses++;
2737 spin_unlock_irqrestore(&intf->counter_lock, flags);
2741 memcpy(recv_msg->msg_data,
2743 msg->rsp_size - 11);
2744 /* The other fields matched, so no need to set them, except
2745 for netfn, which needs to be the response that was
2746 returned, not the request value. */
2747 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2748 recv_msg->msg.data = recv_msg->msg_data;
2749 recv_msg->msg.data_len = msg->rsp_size - 12;
2750 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2751 spin_lock_irqsave(&intf->counter_lock, flags);
2752 intf->handled_lan_responses++;
2753 spin_unlock_irqrestore(&intf->counter_lock, flags);
2754 deliver_response(recv_msg);
2759 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2760 struct ipmi_smi_msg *msg)
2762 struct cmd_rcvr *rcvr;
2764 unsigned char netfn;
2767 ipmi_user_t user = NULL;
2768 struct ipmi_lan_addr *lan_addr;
2769 struct ipmi_recv_msg *recv_msg;
2770 unsigned long flags;
2772 if (msg->rsp_size < 12) {
2773 /* Message not big enough, just ignore it. */
2774 spin_lock_irqsave(&intf->counter_lock, flags);
2775 intf->invalid_commands++;
2776 spin_unlock_irqrestore(&intf->counter_lock, flags);
2780 if (msg->rsp[2] != 0) {
2781 /* An error getting the response, just ignore it. */
2785 netfn = msg->rsp[6] >> 2;
2787 chan = msg->rsp[3] & 0xf;
2790 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2793 kref_get(&user->refcount);
2799 /* We didn't find a user, just give up. */
2800 spin_lock_irqsave(&intf->counter_lock, flags);
2801 intf->unhandled_commands++;
2802 spin_unlock_irqrestore(&intf->counter_lock, flags);
2804 rv = 0; /* Don't do anything with these messages, just
2805 allow them to be freed. */
2807 /* Deliver the message to the user. */
2808 spin_lock_irqsave(&intf->counter_lock, flags);
2809 intf->handled_commands++;
2810 spin_unlock_irqrestore(&intf->counter_lock, flags);
2812 recv_msg = ipmi_alloc_recv_msg();
2814 /* We couldn't allocate memory for the
2815 message, so requeue it for handling
2818 kref_put(&user->refcount, free_user);
2820 /* Extract the source address from the data. */
2821 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2822 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2823 lan_addr->session_handle = msg->rsp[4];
2824 lan_addr->remote_SWID = msg->rsp[8];
2825 lan_addr->local_SWID = msg->rsp[5];
2826 lan_addr->lun = msg->rsp[9] & 3;
2827 lan_addr->channel = msg->rsp[3] & 0xf;
2828 lan_addr->privilege = msg->rsp[3] >> 4;
2830 /* Extract the rest of the message information
2831 from the IPMB header.*/
2832 recv_msg->user = user;
2833 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2834 recv_msg->msgid = msg->rsp[9] >> 2;
2835 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2836 recv_msg->msg.cmd = msg->rsp[10];
2837 recv_msg->msg.data = recv_msg->msg_data;
2839 /* We chop off 12, not 11 bytes because the checksum
2840 at the end also needs to be removed. */
2841 recv_msg->msg.data_len = msg->rsp_size - 12;
2842 memcpy(recv_msg->msg_data,
2844 msg->rsp_size - 12);
2845 deliver_response(recv_msg);
2852 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2853 struct ipmi_smi_msg *msg)
2855 struct ipmi_system_interface_addr *smi_addr;
2857 recv_msg->msgid = 0;
2858 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2859 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2860 smi_addr->channel = IPMI_BMC_CHANNEL;
2861 smi_addr->lun = msg->rsp[0] & 3;
2862 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2863 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2864 recv_msg->msg.cmd = msg->rsp[1];
2865 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2866 recv_msg->msg.data = recv_msg->msg_data;
2867 recv_msg->msg.data_len = msg->rsp_size - 3;
2870 static int handle_read_event_rsp(ipmi_smi_t intf,
2871 struct ipmi_smi_msg *msg)
2873 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2874 struct list_head msgs;
2877 int deliver_count = 0;
2878 unsigned long flags;
2880 if (msg->rsp_size < 19) {
2881 /* Message is too small to be an IPMB event. */
2882 spin_lock_irqsave(&intf->counter_lock, flags);
2883 intf->invalid_events++;
2884 spin_unlock_irqrestore(&intf->counter_lock, flags);
2888 if (msg->rsp[2] != 0) {
2889 /* An error getting the event, just ignore it. */
2893 INIT_LIST_HEAD(&msgs);
2895 spin_lock_irqsave(&intf->events_lock, flags);
2897 spin_lock(&intf->counter_lock);
2899 spin_unlock(&intf->counter_lock);
2901 /* Allocate and fill in one message for every user that is getting
2904 list_for_each_entry_rcu(user, &intf->users, link) {
2905 if (!user->gets_events)
2908 recv_msg = ipmi_alloc_recv_msg();
2911 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2913 list_del(&recv_msg->link);
2914 ipmi_free_recv_msg(recv_msg);
2916 /* We couldn't allocate memory for the
2917 message, so requeue it for handling
2925 copy_event_into_recv_msg(recv_msg, msg);
2926 recv_msg->user = user;
2927 kref_get(&user->refcount);
2928 list_add_tail(&(recv_msg->link), &msgs);
2932 if (deliver_count) {
2933 /* Now deliver all the messages. */
2934 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2935 list_del(&recv_msg->link);
2936 deliver_response(recv_msg);
2938 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2939 /* No one to receive the message, put it in queue if there's
2940 not already too many things in the queue. */
2941 recv_msg = ipmi_alloc_recv_msg();
2943 /* We couldn't allocate memory for the
2944 message, so requeue it for handling
2950 copy_event_into_recv_msg(recv_msg, msg);
2951 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2952 intf->waiting_events_count++;
2954 /* There's too many things in the queue, discard this
2956 printk(KERN_WARNING PFX "Event queue full, discarding an"
2957 " incoming event\n");
2961 spin_unlock_irqrestore(&(intf->events_lock), flags);
2966 static int handle_bmc_rsp(ipmi_smi_t intf,
2967 struct ipmi_smi_msg *msg)
2969 struct ipmi_recv_msg *recv_msg;
2970 unsigned long flags;
2971 struct ipmi_user *user;
2973 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2974 if (recv_msg == NULL)
2976 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2977 "could be because of a malformed message, or\n"
2978 "because of a hardware error. Contact your\n"
2979 "hardware vender for assistance\n");
2983 user = recv_msg->user;
2984 /* Make sure the user still exists. */
2985 if (user && !user->valid) {
2986 /* The user for the message went away, so give up. */
2987 spin_lock_irqsave(&intf->counter_lock, flags);
2988 intf->unhandled_local_responses++;
2989 spin_unlock_irqrestore(&intf->counter_lock, flags);
2990 ipmi_free_recv_msg(recv_msg);
2992 struct ipmi_system_interface_addr *smi_addr;
2994 spin_lock_irqsave(&intf->counter_lock, flags);
2995 intf->handled_local_responses++;
2996 spin_unlock_irqrestore(&intf->counter_lock, flags);
2997 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2998 recv_msg->msgid = msg->msgid;
2999 smi_addr = ((struct ipmi_system_interface_addr *)
3001 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3002 smi_addr->channel = IPMI_BMC_CHANNEL;
3003 smi_addr->lun = msg->rsp[0] & 3;
3004 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3005 recv_msg->msg.cmd = msg->rsp[1];
3006 memcpy(recv_msg->msg_data,
3009 recv_msg->msg.data = recv_msg->msg_data;
3010 recv_msg->msg.data_len = msg->rsp_size - 2;
3011 deliver_response(recv_msg);
3017 /* Handle a new message. Return 1 if the message should be requeued,
3018 0 if the message should be freed, or -1 if the message should not
3019 be freed or requeued. */
3020 static int handle_new_recv_msg(ipmi_smi_t intf,
3021 struct ipmi_smi_msg *msg)
3029 for (m = 0; m < msg->rsp_size; m++)
3030 printk(" %2.2x", msg->rsp[m]);
3033 if (msg->rsp_size < 2) {
3034 /* Message is too small to be correct. */
3035 printk(KERN_WARNING PFX "BMC returned to small a message"
3036 " for netfn %x cmd %x, got %d bytes\n",
3037 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3039 /* Generate an error response for the message. */
3040 msg->rsp[0] = msg->data[0] | (1 << 2);
3041 msg->rsp[1] = msg->data[1];
3042 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3044 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3045 || (msg->rsp[1] != msg->data[1])) /* Command */
3047 /* The response is not even marginally correct. */
3048 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3049 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3050 (msg->data[0] >> 2) | 1, msg->data[1],
3051 msg->rsp[0] >> 2, msg->rsp[1]);
3053 /* Generate an error response for the message. */
3054 msg->rsp[0] = msg->data[0] | (1 << 2);
3055 msg->rsp[1] = msg->data[1];
3056 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3060 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3061 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3062 && (msg->user_data != NULL))
3064 /* It's a response to a response we sent. For this we
3065 deliver a send message response to the user. */
3066 struct ipmi_recv_msg *recv_msg = msg->user_data;
3069 if (msg->rsp_size < 2)
3070 /* Message is too small to be correct. */
3073 chan = msg->data[2] & 0x0f;
3074 if (chan >= IPMI_MAX_CHANNELS)
3075 /* Invalid channel number */
3081 /* Make sure the user still exists. */
3082 if (!recv_msg->user || !recv_msg->user->valid)
3085 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3086 recv_msg->msg.data = recv_msg->msg_data;
3087 recv_msg->msg.data_len = 1;
3088 recv_msg->msg_data[0] = msg->rsp[2];
3089 deliver_response(recv_msg);
3090 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3091 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3093 /* It's from the receive queue. */
3094 chan = msg->rsp[3] & 0xf;
3095 if (chan >= IPMI_MAX_CHANNELS) {
3096 /* Invalid channel number */
3101 switch (intf->channels[chan].medium) {
3102 case IPMI_CHANNEL_MEDIUM_IPMB:
3103 if (msg->rsp[4] & 0x04) {
3104 /* It's a response, so find the
3105 requesting message and send it up. */
3106 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3108 /* It's a command to the SMS from some other
3109 entity. Handle that. */
3110 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3114 case IPMI_CHANNEL_MEDIUM_8023LAN:
3115 case IPMI_CHANNEL_MEDIUM_ASYNC:
3116 if (msg->rsp[6] & 0x04) {
3117 /* It's a response, so find the
3118 requesting message and send it up. */
3119 requeue = handle_lan_get_msg_rsp(intf, msg);
3121 /* It's a command to the SMS from some other
3122 entity. Handle that. */
3123 requeue = handle_lan_get_msg_cmd(intf, msg);
3128 /* We don't handle the channel type, so just
3129 * free the message. */
3133 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3134 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3136 /* It's an asyncronous event. */
3137 requeue = handle_read_event_rsp(intf, msg);
3139 /* It's a response from the local BMC. */
3140 requeue = handle_bmc_rsp(intf, msg);
3147 /* Handle a new message from the lower layer. */
3148 void ipmi_smi_msg_received(ipmi_smi_t intf,
3149 struct ipmi_smi_msg *msg)
3151 unsigned long flags;
3155 if ((msg->data_size >= 2)
3156 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3157 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3158 && (msg->user_data == NULL))
3160 /* This is the local response to a command send, start
3161 the timer for these. The user_data will not be
3162 NULL if this is a response send, and we will let
3163 response sends just go through. */
3165 /* Check for errors, if we get certain errors (ones
3166 that mean basically we can try again later), we
3167 ignore them and start the timer. Otherwise we
3168 report the error immediately. */
3169 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3170 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3171 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3173 int chan = msg->rsp[3] & 0xf;
3175 /* Got an error sending the message, handle it. */
3176 spin_lock_irqsave(&intf->counter_lock, flags);
3177 if (chan >= IPMI_MAX_CHANNELS)
3178 ; /* This shouldn't happen */
3179 else if ((intf->channels[chan].medium
3180 == IPMI_CHANNEL_MEDIUM_8023LAN)
3181 || (intf->channels[chan].medium
3182 == IPMI_CHANNEL_MEDIUM_ASYNC))
3183 intf->sent_lan_command_errs++;
3185 intf->sent_ipmb_command_errs++;
3186 spin_unlock_irqrestore(&intf->counter_lock, flags);
3187 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3189 /* The message was sent, start the timer. */
3190 intf_start_seq_timer(intf, msg->msgid);
3193 ipmi_free_smi_msg(msg);
3197 /* To preserve message order, if the list is not empty, we
3198 tack this message onto the end of the list. */
3199 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3200 if (!list_empty(&intf->waiting_msgs)) {
3201 list_add_tail(&msg->link, &intf->waiting_msgs);
3202 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3205 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3207 rv = handle_new_recv_msg(intf, msg);
3209 /* Could not handle the message now, just add it to a
3210 list to handle later. */
3211 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3212 list_add_tail(&msg->link, &intf->waiting_msgs);
3213 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3214 } else if (rv == 0) {
3215 ipmi_free_smi_msg(msg);
3222 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3227 list_for_each_entry_rcu(user, &intf->users, link) {
3228 if (!user->handler->ipmi_watchdog_pretimeout)
3231 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3237 handle_msg_timeout(struct ipmi_recv_msg *msg)
3239 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3240 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3241 msg->msg.netfn |= 1; /* Convert to a response. */
3242 msg->msg.data_len = 1;
3243 msg->msg.data = msg->msg_data;
3244 deliver_response(msg);
3247 static struct ipmi_smi_msg *
3248 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3249 unsigned char seq, long seqid)
3251 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3253 /* If we can't allocate the message, then just return, we
3254 get 4 retries, so this should be ok. */
3257 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3258 smi_msg->data_size = recv_msg->msg.data_len;
3259 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3265 for (m = 0; m < smi_msg->data_size; m++)
3266 printk(" %2.2x", smi_msg->data[m]);
3273 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3274 struct list_head *timeouts, long timeout_period,
3275 int slot, unsigned long *flags)
3277 struct ipmi_recv_msg *msg;
3282 ent->timeout -= timeout_period;
3283 if (ent->timeout > 0)
3286 if (ent->retries_left == 0) {
3287 /* The message has used all its retries. */
3289 msg = ent->recv_msg;
3290 list_add_tail(&msg->link, timeouts);
3291 spin_lock(&intf->counter_lock);
3293 intf->timed_out_ipmb_broadcasts++;
3294 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3295 intf->timed_out_lan_commands++;
3297 intf->timed_out_ipmb_commands++;
3298 spin_unlock(&intf->counter_lock);
3300 struct ipmi_smi_msg *smi_msg;
3301 /* More retries, send again. */
3303 /* Start with the max timer, set to normal
3304 timer after the message is sent. */
3305 ent->timeout = MAX_MSG_TIMEOUT;
3306 ent->retries_left--;
3307 spin_lock(&intf->counter_lock);
3308 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3309 intf->retransmitted_lan_commands++;
3311 intf->retransmitted_ipmb_commands++;
3312 spin_unlock(&intf->counter_lock);
3314 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3319 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3320 /* Send the new message. We send with a zero
3321 * priority. It timed out, I doubt time is
3322 * that critical now, and high priority
3323 * messages are really only for messages to the
3324 * local MC, which don't get resent. */
3325 intf->handlers->sender(intf->send_info,
3327 spin_lock_irqsave(&intf->seq_lock, *flags);
3331 static void ipmi_timeout_handler(long timeout_period)
3334 struct list_head timeouts;
3335 struct ipmi_recv_msg *msg, *msg2;
3336 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3337 unsigned long flags;
3340 INIT_LIST_HEAD(&timeouts);
3342 spin_lock(&interfaces_lock);
3343 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3344 intf = ipmi_interfaces[i];
3345 if (IPMI_INVALID_INTERFACE(intf))
3347 kref_get(&intf->refcount);
3348 spin_unlock(&interfaces_lock);
3350 /* See if any waiting messages need to be processed. */
3351 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3352 list_for_each_entry_safe(smi_msg, smi_msg2,
3353 &intf->waiting_msgs, link) {
3354 if (!handle_new_recv_msg(intf, smi_msg)) {
3355 list_del(&smi_msg->link);
3356 ipmi_free_smi_msg(smi_msg);
3358 /* To preserve message order, quit if we
3359 can't handle a message. */
3363 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3365 /* Go through the seq table and find any messages that
3366 have timed out, putting them in the timeouts
3368 spin_lock_irqsave(&intf->seq_lock, flags);
3369 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3370 check_msg_timeout(intf, &(intf->seq_table[j]),
3371 &timeouts, timeout_period, j,
3373 spin_unlock_irqrestore(&intf->seq_lock, flags);
3375 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3376 handle_msg_timeout(msg);
3378 kref_put(&intf->refcount, intf_free);
3379 spin_lock(&interfaces_lock);
3381 spin_unlock(&interfaces_lock);
3384 static void ipmi_request_event(void)
3389 spin_lock(&interfaces_lock);
3390 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3391 intf = ipmi_interfaces[i];
3392 if (IPMI_INVALID_INTERFACE(intf))
3395 intf->handlers->request_events(intf->send_info);
3397 spin_unlock(&interfaces_lock);
3400 static struct timer_list ipmi_timer;
3402 /* Call every ~100 ms. */
3403 #define IPMI_TIMEOUT_TIME 100
3405 /* How many jiffies does it take to get to the timeout time. */
3406 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3408 /* Request events from the queue every second (this is the number of
3409 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3410 future, IPMI will add a way to know immediately if an event is in
3411 the queue and this silliness can go away. */
3412 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3414 static atomic_t stop_operation;
3415 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3417 static void ipmi_timeout(unsigned long data)
3419 if (atomic_read(&stop_operation))
3423 if (ticks_to_req_ev == 0) {
3424 ipmi_request_event();
3425 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3428 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3430 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3434 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3435 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3437 /* FIXME - convert these to slabs. */
3438 static void free_smi_msg(struct ipmi_smi_msg *msg)
3440 atomic_dec(&smi_msg_inuse_count);
3444 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3446 struct ipmi_smi_msg *rv;
3447 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3449 rv->done = free_smi_msg;
3450 rv->user_data = NULL;
3451 atomic_inc(&smi_msg_inuse_count);
3456 static void free_recv_msg(struct ipmi_recv_msg *msg)
3458 atomic_dec(&recv_msg_inuse_count);
3462 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3464 struct ipmi_recv_msg *rv;
3466 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3469 rv->done = free_recv_msg;
3470 atomic_inc(&recv_msg_inuse_count);
3475 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3478 kref_put(&msg->user->refcount, free_user);
3482 #ifdef CONFIG_IPMI_PANIC_EVENT
3484 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3488 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3492 #ifdef CONFIG_IPMI_PANIC_STRING
3493 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3495 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3496 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3497 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3498 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3500 /* A get event receiver command, save it. */
3501 intf->event_receiver = msg->msg.data[1];
3502 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3506 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3508 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3509 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3510 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3511 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3513 /* A get device id command, save if we are an event
3514 receiver or generator. */
3515 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3516 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3521 static void send_panic_events(char *str)
3523 struct kernel_ipmi_msg msg;
3525 unsigned char data[16];
3527 struct ipmi_system_interface_addr *si;
3528 struct ipmi_addr addr;
3529 struct ipmi_smi_msg smi_msg;
3530 struct ipmi_recv_msg recv_msg;
3532 si = (struct ipmi_system_interface_addr *) &addr;
3533 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3534 si->channel = IPMI_BMC_CHANNEL;
3537 /* Fill in an event telling that we have failed. */
3538 msg.netfn = 0x04; /* Sensor or Event. */
3539 msg.cmd = 2; /* Platform event command. */
3542 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3543 data[1] = 0x03; /* This is for IPMI 1.0. */
3544 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3545 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3546 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3548 /* Put a few breadcrumbs in. Hopefully later we can add more things
3549 to make the panic events more useful. */
3556 smi_msg.done = dummy_smi_done_handler;
3557 recv_msg.done = dummy_recv_done_handler;
3559 /* For every registered interface, send the event. */
3560 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3561 intf = ipmi_interfaces[i];
3562 if (IPMI_INVALID_INTERFACE(intf))
3565 /* Send the event announcing the panic. */
3566 intf->handlers->set_run_to_completion(intf->send_info, 1);
3567 i_ipmi_request(NULL,
3576 intf->channels[0].address,
3577 intf->channels[0].lun,
3578 0, 1); /* Don't retry, and don't wait. */
3581 #ifdef CONFIG_IPMI_PANIC_STRING
3582 /* On every interface, dump a bunch of OEM event holding the
3587 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3589 struct ipmi_ipmb_addr *ipmb;
3592 intf = ipmi_interfaces[i];
3593 if (IPMI_INVALID_INTERFACE(intf))
3596 /* First job here is to figure out where to send the
3597 OEM events. There's no way in IPMI to send OEM
3598 events using an event send command, so we have to
3599 find the SEL to put them in and stick them in
3602 /* Get capabilities from the get device id. */
3603 intf->local_sel_device = 0;
3604 intf->local_event_generator = 0;
3605 intf->event_receiver = 0;
3607 /* Request the device info from the local MC. */
3608 msg.netfn = IPMI_NETFN_APP_REQUEST;
3609 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3612 intf->null_user_handler = device_id_fetcher;
3613 i_ipmi_request(NULL,
3622 intf->channels[0].address,
3623 intf->channels[0].lun,
3624 0, 1); /* Don't retry, and don't wait. */
3626 if (intf->local_event_generator) {
3627 /* Request the event receiver from the local MC. */
3628 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3629 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3632 intf->null_user_handler = event_receiver_fetcher;
3633 i_ipmi_request(NULL,
3642 intf->channels[0].address,
3643 intf->channels[0].lun,
3644 0, 1); /* no retry, and no wait. */
3646 intf->null_user_handler = NULL;
3648 /* Validate the event receiver. The low bit must not
3649 be 1 (it must be a valid IPMB address), it cannot
3650 be zero, and it must not be my address. */
3651 if (((intf->event_receiver & 1) == 0)
3652 && (intf->event_receiver != 0)
3653 && (intf->event_receiver != intf->channels[0].address))
3655 /* The event receiver is valid, send an IPMB
3657 ipmb = (struct ipmi_ipmb_addr *) &addr;
3658 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3659 ipmb->channel = 0; /* FIXME - is this right? */
3660 ipmb->lun = intf->event_receiver_lun;
3661 ipmb->slave_addr = intf->event_receiver;
3662 } else if (intf->local_sel_device) {
3663 /* The event receiver was not valid (or was
3664 me), but I am an SEL device, just dump it
3666 si = (struct ipmi_system_interface_addr *) &addr;
3667 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3668 si->channel = IPMI_BMC_CHANNEL;
3671 continue; /* No where to send the event. */
3674 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3675 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3681 int size = strlen(p);
3687 data[2] = 0xf0; /* OEM event without timestamp. */
3688 data[3] = intf->channels[0].address;
3689 data[4] = j++; /* sequence # */
3690 /* Always give 11 bytes, so strncpy will fill
3691 it with zeroes for me. */
3692 strncpy(data+5, p, 11);
3695 i_ipmi_request(NULL,
3704 intf->channels[0].address,
3705 intf->channels[0].lun,
3706 0, 1); /* no retry, and no wait. */
3709 #endif /* CONFIG_IPMI_PANIC_STRING */
3711 #endif /* CONFIG_IPMI_PANIC_EVENT */
3713 static int has_panicked = 0;
3715 static int panic_event(struct notifier_block *this,
3716 unsigned long event,
3726 /* For every registered interface, set it to run to completion. */
3727 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3728 intf = ipmi_interfaces[i];
3729 if (IPMI_INVALID_INTERFACE(intf))
3732 intf->handlers->set_run_to_completion(intf->send_info, 1);
3735 #ifdef CONFIG_IPMI_PANIC_EVENT
3736 send_panic_events(ptr);
3742 static struct notifier_block panic_block = {
3743 .notifier_call = panic_event,
3745 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3748 static int ipmi_init_msghandler(void)
3756 rv = driver_register(&ipmidriver);
3758 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3762 printk(KERN_INFO "ipmi message handler version "
3763 IPMI_DRIVER_VERSION "\n");
3765 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3766 ipmi_interfaces[i] = NULL;
3768 #ifdef CONFIG_PROC_FS
3769 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3770 if (!proc_ipmi_root) {
3771 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3775 proc_ipmi_root->owner = THIS_MODULE;
3776 #endif /* CONFIG_PROC_FS */
3778 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3779 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3781 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3788 static __init int ipmi_init_msghandler_mod(void)
3790 ipmi_init_msghandler();
3794 static __exit void cleanup_ipmi(void)
3801 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3803 /* This can't be called if any interfaces exist, so no worry about
3804 shutting down the interfaces. */
3806 /* Tell the timer to stop, then wait for it to stop. This avoids
3807 problems with race conditions removing the timer here. */
3808 atomic_inc(&stop_operation);
3809 del_timer_sync(&ipmi_timer);
3811 #ifdef CONFIG_PROC_FS
3812 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3813 #endif /* CONFIG_PROC_FS */
3815 driver_unregister(&ipmidriver);
3819 /* Check for buffer leaks. */
3820 count = atomic_read(&smi_msg_inuse_count);
3822 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3824 count = atomic_read(&recv_msg_inuse_count);
3826 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3829 module_exit(cleanup_ipmi);
3831 module_init(ipmi_init_msghandler_mod);
3832 MODULE_LICENSE("GPL");
3833 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3834 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3835 MODULE_VERSION(IPMI_DRIVER_VERSION);
3837 EXPORT_SYMBOL(ipmi_create_user);
3838 EXPORT_SYMBOL(ipmi_destroy_user);
3839 EXPORT_SYMBOL(ipmi_get_version);
3840 EXPORT_SYMBOL(ipmi_request_settime);
3841 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3842 EXPORT_SYMBOL(ipmi_register_smi);
3843 EXPORT_SYMBOL(ipmi_unregister_smi);
3844 EXPORT_SYMBOL(ipmi_register_for_cmd);
3845 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3846 EXPORT_SYMBOL(ipmi_smi_msg_received);
3847 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3848 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3849 EXPORT_SYMBOL(ipmi_addr_length);
3850 EXPORT_SYMBOL(ipmi_validate_addr);
3851 EXPORT_SYMBOL(ipmi_set_gets_events);
3852 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3853 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3854 EXPORT_SYMBOL(ipmi_set_my_address);
3855 EXPORT_SYMBOL(ipmi_get_my_address);
3856 EXPORT_SYMBOL(ipmi_set_my_LUN);
3857 EXPORT_SYMBOL(ipmi_get_my_LUN);
3858 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3859 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3860 EXPORT_SYMBOL(ipmi_free_recv_msg);