4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
172 struct kref refcount;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount;
196 /* The list of upper layers that are using me. seq_lock
198 struct list_head users;
200 /* Used for wake ups at startup. */
201 wait_queue_head_t waitq;
203 struct bmc_device *bmc;
206 /* This is the lower-layer's sender routine. */
207 struct ipmi_smi_handlers *handlers;
210 #ifdef CONFIG_PROC_FS
211 /* A list of proc entries for this interface. This does not
212 need a lock, only one thread creates it and only one thread
214 spinlock_t proc_entry_lock;
215 struct ipmi_proc_entry *proc_entries;
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
221 /* A table of sequence numbers for this interface. We use the
222 sequence numbers for IPMB messages that go out of the
223 interface to match them up with their responses. A routine
224 is called periodically to time the items in this list. */
226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
229 /* Messages that were delayed for some reason (out of memory,
230 for instance), will go in here to be processed later in a
231 periodic timer interrupt. */
232 spinlock_t waiting_msgs_lock;
233 struct list_head waiting_msgs;
235 /* The list of command receivers that are registered for commands
236 on this interface. */
237 struct mutex cmd_rcvrs_mutex;
238 struct list_head cmd_rcvrs;
240 /* Events that were queues because no one was there to receive
242 spinlock_t events_lock; /* For dealing with event stuff. */
243 struct list_head waiting_events;
244 unsigned int waiting_events_count; /* How many events in queue? */
246 /* The event receiver for my BMC, only really used at panic
247 shutdown as a place to store this. */
248 unsigned char event_receiver;
249 unsigned char event_receiver_lun;
250 unsigned char local_sel_device;
251 unsigned char local_event_generator;
253 /* A cheap hack, if this is non-null and a message to an
254 interface comes in with a NULL user, call this routine with
255 it. Note that the message will still be freed by the
256 caller. This only works on the system interface. */
257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
259 /* When we are scanning the channels for an SMI, this will
260 tell which channel we are scanning. */
263 /* Channel information */
264 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
267 struct proc_dir_entry *proc_dir;
268 char proc_dir_name[10];
270 spinlock_t counter_lock; /* For making counters atomic. */
272 /* Commands we got that were invalid. */
273 unsigned int sent_invalid_commands;
275 /* Commands we sent to the MC. */
276 unsigned int sent_local_commands;
277 /* Responses from the MC that were delivered to a user. */
278 unsigned int handled_local_responses;
279 /* Responses from the MC that were not delivered to a user. */
280 unsigned int unhandled_local_responses;
282 /* Commands we sent out to the IPMB bus. */
283 unsigned int sent_ipmb_commands;
284 /* Commands sent on the IPMB that had errors on the SEND CMD */
285 unsigned int sent_ipmb_command_errs;
286 /* Each retransmit increments this count. */
287 unsigned int retransmitted_ipmb_commands;
288 /* When a message times out (runs out of retransmits) this is
290 unsigned int timed_out_ipmb_commands;
292 /* This is like above, but for broadcasts. Broadcasts are
293 *not* included in the above count (they are expected to
295 unsigned int timed_out_ipmb_broadcasts;
297 /* Responses I have sent to the IPMB bus. */
298 unsigned int sent_ipmb_responses;
300 /* The response was delivered to the user. */
301 unsigned int handled_ipmb_responses;
302 /* The response had invalid data in it. */
303 unsigned int invalid_ipmb_responses;
304 /* The response didn't have anyone waiting for it. */
305 unsigned int unhandled_ipmb_responses;
307 /* Commands we sent out to the IPMB bus. */
308 unsigned int sent_lan_commands;
309 /* Commands sent on the IPMB that had errors on the SEND CMD */
310 unsigned int sent_lan_command_errs;
311 /* Each retransmit increments this count. */
312 unsigned int retransmitted_lan_commands;
313 /* When a message times out (runs out of retransmits) this is
315 unsigned int timed_out_lan_commands;
317 /* Responses I have sent to the IPMB bus. */
318 unsigned int sent_lan_responses;
320 /* The response was delivered to the user. */
321 unsigned int handled_lan_responses;
322 /* The response had invalid data in it. */
323 unsigned int invalid_lan_responses;
324 /* The response didn't have anyone waiting for it. */
325 unsigned int unhandled_lan_responses;
327 /* The command was delivered to the user. */
328 unsigned int handled_commands;
329 /* The command had invalid data in it. */
330 unsigned int invalid_commands;
331 /* The command didn't have anyone waiting for it. */
332 unsigned int unhandled_commands;
334 /* Invalid data in an event. */
335 unsigned int invalid_events;
336 /* Events that were received with the proper format. */
339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
341 /* Used to mark an interface entry that cannot be used but is not a
342 * free entry, either, primarily used at creation and deletion time so
343 * a slot doesn't get reused too quickly. */
344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
349 * The driver model view of the IPMI messaging driver.
351 static struct device_driver ipmidriver = {
353 .bus = &platform_bus_type
355 static DEFINE_MUTEX(ipmidriver_mutex);
357 #define MAX_IPMI_INTERFACES 4
358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
360 /* Directly protects the ipmi_interfaces data structure. */
361 static DEFINE_SPINLOCK(interfaces_lock);
363 /* List of watchers that want to know when smi's are added and
365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
366 static DECLARE_RWSEM(smi_watchers_sem);
369 static void free_recv_msg_list(struct list_head *q)
371 struct ipmi_recv_msg *msg, *msg2;
373 list_for_each_entry_safe(msg, msg2, q, link) {
374 list_del(&msg->link);
375 ipmi_free_recv_msg(msg);
379 static void clean_up_interface_data(ipmi_smi_t intf)
382 struct cmd_rcvr *rcvr, *rcvr2;
383 struct list_head list;
385 free_recv_msg_list(&intf->waiting_msgs);
386 free_recv_msg_list(&intf->waiting_events);
388 /* Wholesale remove all the entries from the list in the
389 * interface and wait for RCU to know that none are in use. */
390 mutex_lock(&intf->cmd_rcvrs_mutex);
391 list_add_rcu(&list, &intf->cmd_rcvrs);
392 list_del_rcu(&intf->cmd_rcvrs);
393 mutex_unlock(&intf->cmd_rcvrs_mutex);
396 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
400 if ((intf->seq_table[i].inuse)
401 && (intf->seq_table[i].recv_msg))
403 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
408 static void intf_free(struct kref *ref)
410 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
412 clean_up_interface_data(intf);
416 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
421 down_write(&smi_watchers_sem);
422 list_add(&(watcher->link), &smi_watchers);
423 up_write(&smi_watchers_sem);
424 spin_lock_irqsave(&interfaces_lock, flags);
425 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
426 ipmi_smi_t intf = ipmi_interfaces[i];
427 if (IPMI_INVALID_INTERFACE(intf))
429 spin_unlock_irqrestore(&interfaces_lock, flags);
430 watcher->new_smi(i, intf->si_dev);
431 spin_lock_irqsave(&interfaces_lock, flags);
433 spin_unlock_irqrestore(&interfaces_lock, flags);
437 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
439 down_write(&smi_watchers_sem);
440 list_del(&(watcher->link));
441 up_write(&smi_watchers_sem);
446 call_smi_watchers(int i, struct device *dev)
448 struct ipmi_smi_watcher *w;
450 down_read(&smi_watchers_sem);
451 list_for_each_entry(w, &smi_watchers, link) {
452 if (try_module_get(w->owner)) {
454 module_put(w->owner);
457 up_read(&smi_watchers_sem);
461 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
463 if (addr1->addr_type != addr2->addr_type)
466 if (addr1->channel != addr2->channel)
469 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
470 struct ipmi_system_interface_addr *smi_addr1
471 = (struct ipmi_system_interface_addr *) addr1;
472 struct ipmi_system_interface_addr *smi_addr2
473 = (struct ipmi_system_interface_addr *) addr2;
474 return (smi_addr1->lun == smi_addr2->lun);
477 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
478 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
480 struct ipmi_ipmb_addr *ipmb_addr1
481 = (struct ipmi_ipmb_addr *) addr1;
482 struct ipmi_ipmb_addr *ipmb_addr2
483 = (struct ipmi_ipmb_addr *) addr2;
485 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
486 && (ipmb_addr1->lun == ipmb_addr2->lun));
489 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
490 struct ipmi_lan_addr *lan_addr1
491 = (struct ipmi_lan_addr *) addr1;
492 struct ipmi_lan_addr *lan_addr2
493 = (struct ipmi_lan_addr *) addr2;
495 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
496 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
497 && (lan_addr1->session_handle
498 == lan_addr2->session_handle)
499 && (lan_addr1->lun == lan_addr2->lun));
505 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
507 if (len < sizeof(struct ipmi_system_interface_addr)) {
511 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
512 if (addr->channel != IPMI_BMC_CHANNEL)
517 if ((addr->channel == IPMI_BMC_CHANNEL)
518 || (addr->channel >= IPMI_MAX_CHANNELS)
519 || (addr->channel < 0))
522 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
523 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
525 if (len < sizeof(struct ipmi_ipmb_addr)) {
531 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
532 if (len < sizeof(struct ipmi_lan_addr)) {
541 unsigned int ipmi_addr_length(int addr_type)
543 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
544 return sizeof(struct ipmi_system_interface_addr);
546 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
547 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
549 return sizeof(struct ipmi_ipmb_addr);
552 if (addr_type == IPMI_LAN_ADDR_TYPE)
553 return sizeof(struct ipmi_lan_addr);
558 static void deliver_response(struct ipmi_recv_msg *msg)
561 ipmi_smi_t intf = msg->user_msg_data;
564 /* Special handling for NULL users. */
565 if (intf->null_user_handler) {
566 intf->null_user_handler(intf, msg);
567 spin_lock_irqsave(&intf->counter_lock, flags);
568 intf->handled_local_responses++;
569 spin_unlock_irqrestore(&intf->counter_lock, flags);
571 /* No handler, so give up. */
572 spin_lock_irqsave(&intf->counter_lock, flags);
573 intf->unhandled_local_responses++;
574 spin_unlock_irqrestore(&intf->counter_lock, flags);
576 ipmi_free_recv_msg(msg);
578 ipmi_user_t user = msg->user;
579 user->handler->ipmi_recv_hndl(msg, user->handler_data);
583 /* Find the next sequence number not being used and add the given
584 message with the given timeout to the sequence table. This must be
585 called with the interface's seq_lock held. */
586 static int intf_next_seq(ipmi_smi_t intf,
587 struct ipmi_recv_msg *recv_msg,
588 unsigned long timeout,
597 for (i = intf->curr_seq;
598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
599 i = (i+1)%IPMI_IPMB_NUM_SEQ)
601 if (!intf->seq_table[i].inuse)
605 if (!intf->seq_table[i].inuse) {
606 intf->seq_table[i].recv_msg = recv_msg;
608 /* Start with the maximum timeout, when the send response
609 comes in we will start the real timer. */
610 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
611 intf->seq_table[i].orig_timeout = timeout;
612 intf->seq_table[i].retries_left = retries;
613 intf->seq_table[i].broadcast = broadcast;
614 intf->seq_table[i].inuse = 1;
615 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
617 *seqid = intf->seq_table[i].seqid;
618 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
626 /* Return the receive message for the given sequence number and
627 release the sequence number so it can be reused. Some other data
628 is passed in to be sure the message matches up correctly (to help
629 guard against message coming in after their timeout and the
630 sequence number being reused). */
631 static int intf_find_seq(ipmi_smi_t intf,
636 struct ipmi_addr *addr,
637 struct ipmi_recv_msg **recv_msg)
642 if (seq >= IPMI_IPMB_NUM_SEQ)
645 spin_lock_irqsave(&(intf->seq_lock), flags);
646 if (intf->seq_table[seq].inuse) {
647 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
649 if ((msg->addr.channel == channel)
650 && (msg->msg.cmd == cmd)
651 && (msg->msg.netfn == netfn)
652 && (ipmi_addr_equal(addr, &(msg->addr))))
655 intf->seq_table[seq].inuse = 0;
659 spin_unlock_irqrestore(&(intf->seq_lock), flags);
665 /* Start the timer for a specific sequence table entry. */
666 static int intf_start_seq_timer(ipmi_smi_t intf,
675 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
677 spin_lock_irqsave(&(intf->seq_lock), flags);
678 /* We do this verification because the user can be deleted
679 while a message is outstanding. */
680 if ((intf->seq_table[seq].inuse)
681 && (intf->seq_table[seq].seqid == seqid))
683 struct seq_table *ent = &(intf->seq_table[seq]);
684 ent->timeout = ent->orig_timeout;
687 spin_unlock_irqrestore(&(intf->seq_lock), flags);
692 /* Got an error for the send message for a specific sequence number. */
693 static int intf_err_seq(ipmi_smi_t intf,
701 struct ipmi_recv_msg *msg = NULL;
704 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
706 spin_lock_irqsave(&(intf->seq_lock), flags);
707 /* We do this verification because the user can be deleted
708 while a message is outstanding. */
709 if ((intf->seq_table[seq].inuse)
710 && (intf->seq_table[seq].seqid == seqid))
712 struct seq_table *ent = &(intf->seq_table[seq]);
718 spin_unlock_irqrestore(&(intf->seq_lock), flags);
721 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
722 msg->msg_data[0] = err;
723 msg->msg.netfn |= 1; /* Convert to a response. */
724 msg->msg.data_len = 1;
725 msg->msg.data = msg->msg_data;
726 deliver_response(msg);
733 int ipmi_create_user(unsigned int if_num,
734 struct ipmi_user_hndl *handler,
739 ipmi_user_t new_user;
743 /* There is no module usecount here, because it's not
744 required. Since this can only be used by and called from
745 other modules, they will implicitly use this module, and
746 thus this can't be removed unless the other modules are
752 /* Make sure the driver is actually initialized, this handles
753 problems with initialization order. */
755 rv = ipmi_init_msghandler();
759 /* The init code doesn't return an error if it was turned
760 off, but it won't initialize. Check that. */
765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
769 spin_lock_irqsave(&interfaces_lock, flags);
770 intf = ipmi_interfaces[if_num];
771 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
772 spin_unlock_irqrestore(&interfaces_lock, flags);
777 /* Note that each existing user holds a refcount to the interface. */
778 kref_get(&intf->refcount);
779 spin_unlock_irqrestore(&interfaces_lock, flags);
781 kref_init(&new_user->refcount);
782 new_user->handler = handler;
783 new_user->handler_data = handler_data;
784 new_user->intf = intf;
785 new_user->gets_events = 0;
787 if (!try_module_get(intf->handlers->owner)) {
792 if (intf->handlers->inc_usecount) {
793 rv = intf->handlers->inc_usecount(intf->send_info);
795 module_put(intf->handlers->owner);
801 spin_lock_irqsave(&intf->seq_lock, flags);
802 list_add_rcu(&new_user->link, &intf->users);
803 spin_unlock_irqrestore(&intf->seq_lock, flags);
808 kref_put(&intf->refcount, intf_free);
814 static void free_user(struct kref *ref)
816 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
820 int ipmi_destroy_user(ipmi_user_t user)
822 ipmi_smi_t intf = user->intf;
825 struct cmd_rcvr *rcvr;
826 struct cmd_rcvr *rcvrs = NULL;
830 /* Remove the user from the interface's sequence table. */
831 spin_lock_irqsave(&intf->seq_lock, flags);
832 list_del_rcu(&user->link);
834 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
835 if (intf->seq_table[i].inuse
836 && (intf->seq_table[i].recv_msg->user == user))
838 intf->seq_table[i].inuse = 0;
841 spin_unlock_irqrestore(&intf->seq_lock, flags);
844 * Remove the user from the command receiver's table. First
845 * we build a list of everything (not using the standard link,
846 * since other things may be using it till we do
847 * synchronize_rcu()) then free everything in that list.
849 mutex_lock(&intf->cmd_rcvrs_mutex);
850 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
851 if (rcvr->user == user) {
852 list_del_rcu(&rcvr->link);
857 mutex_unlock(&intf->cmd_rcvrs_mutex);
865 module_put(intf->handlers->owner);
866 if (intf->handlers->dec_usecount)
867 intf->handlers->dec_usecount(intf->send_info);
869 kref_put(&intf->refcount, intf_free);
871 kref_put(&user->refcount, free_user);
876 void ipmi_get_version(ipmi_user_t user,
877 unsigned char *major,
878 unsigned char *minor)
880 *major = ipmi_version_major(&user->intf->bmc->id);
881 *minor = ipmi_version_minor(&user->intf->bmc->id);
884 int ipmi_set_my_address(ipmi_user_t user,
885 unsigned int channel,
886 unsigned char address)
888 if (channel >= IPMI_MAX_CHANNELS)
890 user->intf->channels[channel].address = address;
894 int ipmi_get_my_address(ipmi_user_t user,
895 unsigned int channel,
896 unsigned char *address)
898 if (channel >= IPMI_MAX_CHANNELS)
900 *address = user->intf->channels[channel].address;
904 int ipmi_set_my_LUN(ipmi_user_t user,
905 unsigned int channel,
908 if (channel >= IPMI_MAX_CHANNELS)
910 user->intf->channels[channel].lun = LUN & 0x3;
914 int ipmi_get_my_LUN(ipmi_user_t user,
915 unsigned int channel,
916 unsigned char *address)
918 if (channel >= IPMI_MAX_CHANNELS)
920 *address = user->intf->channels[channel].lun;
924 int ipmi_set_gets_events(ipmi_user_t user, int val)
927 ipmi_smi_t intf = user->intf;
928 struct ipmi_recv_msg *msg, *msg2;
929 struct list_head msgs;
931 INIT_LIST_HEAD(&msgs);
933 spin_lock_irqsave(&intf->events_lock, flags);
934 user->gets_events = val;
937 /* Deliver any queued events. */
938 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
939 list_move_tail(&msg->link, &msgs);
940 intf->waiting_events_count = 0;
943 /* Hold the events lock while doing this to preserve order. */
944 list_for_each_entry_safe(msg, msg2, &msgs, link) {
946 kref_get(&user->refcount);
947 deliver_response(msg);
950 spin_unlock_irqrestore(&intf->events_lock, flags);
955 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
960 struct cmd_rcvr *rcvr;
962 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
963 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
964 && (rcvr->chans & (1 << chan)))
970 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
975 struct cmd_rcvr *rcvr;
977 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
978 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
979 && (rcvr->chans & chans))
985 int ipmi_register_for_cmd(ipmi_user_t user,
990 ipmi_smi_t intf = user->intf;
991 struct cmd_rcvr *rcvr;
995 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1000 rcvr->chans = chans;
1003 mutex_lock(&intf->cmd_rcvrs_mutex);
1004 /* Make sure the command/netfn is not already registered. */
1005 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1010 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1013 mutex_unlock(&intf->cmd_rcvrs_mutex);
1020 int ipmi_unregister_for_cmd(ipmi_user_t user,
1021 unsigned char netfn,
1025 ipmi_smi_t intf = user->intf;
1026 struct cmd_rcvr *rcvr;
1027 struct cmd_rcvr *rcvrs = NULL;
1028 int i, rv = -ENOENT;
1030 mutex_lock(&intf->cmd_rcvrs_mutex);
1031 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1032 if (((1 << i) & chans) == 0)
1034 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1037 if (rcvr->user == user) {
1039 rcvr->chans &= ~chans;
1040 if (rcvr->chans == 0) {
1041 list_del_rcu(&rcvr->link);
1047 mutex_unlock(&intf->cmd_rcvrs_mutex);
1057 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1059 ipmi_smi_t intf = user->intf;
1060 intf->handlers->set_run_to_completion(intf->send_info, val);
1063 static unsigned char
1064 ipmb_checksum(unsigned char *data, int size)
1066 unsigned char csum = 0;
1068 for (; size > 0; size--, data++)
1074 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1075 struct kernel_ipmi_msg *msg,
1076 struct ipmi_ipmb_addr *ipmb_addr,
1078 unsigned char ipmb_seq,
1080 unsigned char source_address,
1081 unsigned char source_lun)
1085 /* Format the IPMB header data. */
1086 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1087 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1088 smi_msg->data[2] = ipmb_addr->channel;
1090 smi_msg->data[3] = 0;
1091 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1092 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1093 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1094 smi_msg->data[i+6] = source_address;
1095 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1096 smi_msg->data[i+8] = msg->cmd;
1098 /* Now tack on the data to the message. */
1099 if (msg->data_len > 0)
1100 memcpy(&(smi_msg->data[i+9]), msg->data,
1102 smi_msg->data_size = msg->data_len + 9;
1104 /* Now calculate the checksum and tack it on. */
1105 smi_msg->data[i+smi_msg->data_size]
1106 = ipmb_checksum(&(smi_msg->data[i+6]),
1107 smi_msg->data_size-6);
1109 /* Add on the checksum size and the offset from the
1111 smi_msg->data_size += 1 + i;
1113 smi_msg->msgid = msgid;
1116 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1117 struct kernel_ipmi_msg *msg,
1118 struct ipmi_lan_addr *lan_addr,
1120 unsigned char ipmb_seq,
1121 unsigned char source_lun)
1123 /* Format the IPMB header data. */
1124 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1125 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1126 smi_msg->data[2] = lan_addr->channel;
1127 smi_msg->data[3] = lan_addr->session_handle;
1128 smi_msg->data[4] = lan_addr->remote_SWID;
1129 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1130 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1131 smi_msg->data[7] = lan_addr->local_SWID;
1132 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1133 smi_msg->data[9] = msg->cmd;
1135 /* Now tack on the data to the message. */
1136 if (msg->data_len > 0)
1137 memcpy(&(smi_msg->data[10]), msg->data,
1139 smi_msg->data_size = msg->data_len + 10;
1141 /* Now calculate the checksum and tack it on. */
1142 smi_msg->data[smi_msg->data_size]
1143 = ipmb_checksum(&(smi_msg->data[7]),
1144 smi_msg->data_size-7);
1146 /* Add on the checksum size and the offset from the
1148 smi_msg->data_size += 1;
1150 smi_msg->msgid = msgid;
1153 /* Separate from ipmi_request so that the user does not have to be
1154 supplied in certain circumstances (mainly at panic time). If
1155 messages are supplied, they will be freed, even if an error
1157 static int i_ipmi_request(ipmi_user_t user,
1159 struct ipmi_addr *addr,
1161 struct kernel_ipmi_msg *msg,
1162 void *user_msg_data,
1164 struct ipmi_recv_msg *supplied_recv,
1166 unsigned char source_address,
1167 unsigned char source_lun,
1169 unsigned int retry_time_ms)
1172 struct ipmi_smi_msg *smi_msg;
1173 struct ipmi_recv_msg *recv_msg;
1174 unsigned long flags;
1177 if (supplied_recv) {
1178 recv_msg = supplied_recv;
1180 recv_msg = ipmi_alloc_recv_msg();
1181 if (recv_msg == NULL) {
1185 recv_msg->user_msg_data = user_msg_data;
1188 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1190 smi_msg = ipmi_alloc_smi_msg();
1191 if (smi_msg == NULL) {
1192 ipmi_free_recv_msg(recv_msg);
1197 recv_msg->user = user;
1199 kref_get(&user->refcount);
1200 recv_msg->msgid = msgid;
1201 /* Store the message to send in the receive message so timeout
1202 responses can get the proper response data. */
1203 recv_msg->msg = *msg;
1205 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1206 struct ipmi_system_interface_addr *smi_addr;
1208 if (msg->netfn & 1) {
1209 /* Responses are not allowed to the SMI. */
1214 smi_addr = (struct ipmi_system_interface_addr *) addr;
1215 if (smi_addr->lun > 3) {
1216 spin_lock_irqsave(&intf->counter_lock, flags);
1217 intf->sent_invalid_commands++;
1218 spin_unlock_irqrestore(&intf->counter_lock, flags);
1223 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1225 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1226 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1227 || (msg->cmd == IPMI_GET_MSG_CMD)
1228 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1230 /* We don't let the user do these, since we manage
1231 the sequence numbers. */
1232 spin_lock_irqsave(&intf->counter_lock, flags);
1233 intf->sent_invalid_commands++;
1234 spin_unlock_irqrestore(&intf->counter_lock, flags);
1239 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1240 spin_lock_irqsave(&intf->counter_lock, flags);
1241 intf->sent_invalid_commands++;
1242 spin_unlock_irqrestore(&intf->counter_lock, flags);
1247 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1248 smi_msg->data[1] = msg->cmd;
1249 smi_msg->msgid = msgid;
1250 smi_msg->user_data = recv_msg;
1251 if (msg->data_len > 0)
1252 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1253 smi_msg->data_size = msg->data_len + 2;
1254 spin_lock_irqsave(&intf->counter_lock, flags);
1255 intf->sent_local_commands++;
1256 spin_unlock_irqrestore(&intf->counter_lock, flags);
1257 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1258 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1260 struct ipmi_ipmb_addr *ipmb_addr;
1261 unsigned char ipmb_seq;
1265 if (addr->channel >= IPMI_MAX_CHANNELS) {
1266 spin_lock_irqsave(&intf->counter_lock, flags);
1267 intf->sent_invalid_commands++;
1268 spin_unlock_irqrestore(&intf->counter_lock, flags);
1273 if (intf->channels[addr->channel].medium
1274 != IPMI_CHANNEL_MEDIUM_IPMB)
1276 spin_lock_irqsave(&intf->counter_lock, flags);
1277 intf->sent_invalid_commands++;
1278 spin_unlock_irqrestore(&intf->counter_lock, flags);
1284 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1285 retries = 0; /* Don't retry broadcasts. */
1289 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1290 /* Broadcasts add a zero at the beginning of the
1291 message, but otherwise is the same as an IPMB
1293 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1298 /* Default to 1 second retries. */
1299 if (retry_time_ms == 0)
1300 retry_time_ms = 1000;
1302 /* 9 for the header and 1 for the checksum, plus
1303 possibly one for the broadcast. */
1304 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1305 spin_lock_irqsave(&intf->counter_lock, flags);
1306 intf->sent_invalid_commands++;
1307 spin_unlock_irqrestore(&intf->counter_lock, flags);
1312 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1313 if (ipmb_addr->lun > 3) {
1314 spin_lock_irqsave(&intf->counter_lock, flags);
1315 intf->sent_invalid_commands++;
1316 spin_unlock_irqrestore(&intf->counter_lock, flags);
1321 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1323 if (recv_msg->msg.netfn & 0x1) {
1324 /* It's a response, so use the user's sequence
1326 spin_lock_irqsave(&intf->counter_lock, flags);
1327 intf->sent_ipmb_responses++;
1328 spin_unlock_irqrestore(&intf->counter_lock, flags);
1329 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1331 source_address, source_lun);
1333 /* Save the receive message so we can use it
1334 to deliver the response. */
1335 smi_msg->user_data = recv_msg;
1337 /* It's a command, so get a sequence for it. */
1339 spin_lock_irqsave(&(intf->seq_lock), flags);
1341 spin_lock(&intf->counter_lock);
1342 intf->sent_ipmb_commands++;
1343 spin_unlock(&intf->counter_lock);
1345 /* Create a sequence number with a 1 second
1346 timeout and 4 retries. */
1347 rv = intf_next_seq(intf,
1355 /* We have used up all the sequence numbers,
1356 probably, so abort. */
1357 spin_unlock_irqrestore(&(intf->seq_lock),
1362 /* Store the sequence number in the message,
1363 so that when the send message response
1364 comes back we can start the timer. */
1365 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1366 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1367 ipmb_seq, broadcast,
1368 source_address, source_lun);
1370 /* Copy the message into the recv message data, so we
1371 can retransmit it later if necessary. */
1372 memcpy(recv_msg->msg_data, smi_msg->data,
1373 smi_msg->data_size);
1374 recv_msg->msg.data = recv_msg->msg_data;
1375 recv_msg->msg.data_len = smi_msg->data_size;
1377 /* We don't unlock until here, because we need
1378 to copy the completed message into the
1379 recv_msg before we release the lock.
1380 Otherwise, race conditions may bite us. I
1381 know that's pretty paranoid, but I prefer
1383 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1385 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1386 struct ipmi_lan_addr *lan_addr;
1387 unsigned char ipmb_seq;
1390 if (addr->channel >= IPMI_MAX_CHANNELS) {
1391 spin_lock_irqsave(&intf->counter_lock, flags);
1392 intf->sent_invalid_commands++;
1393 spin_unlock_irqrestore(&intf->counter_lock, flags);
1398 if ((intf->channels[addr->channel].medium
1399 != IPMI_CHANNEL_MEDIUM_8023LAN)
1400 && (intf->channels[addr->channel].medium
1401 != IPMI_CHANNEL_MEDIUM_ASYNC))
1403 spin_lock_irqsave(&intf->counter_lock, flags);
1404 intf->sent_invalid_commands++;
1405 spin_unlock_irqrestore(&intf->counter_lock, flags);
1412 /* Default to 1 second retries. */
1413 if (retry_time_ms == 0)
1414 retry_time_ms = 1000;
1416 /* 11 for the header and 1 for the checksum. */
1417 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1418 spin_lock_irqsave(&intf->counter_lock, flags);
1419 intf->sent_invalid_commands++;
1420 spin_unlock_irqrestore(&intf->counter_lock, flags);
1425 lan_addr = (struct ipmi_lan_addr *) addr;
1426 if (lan_addr->lun > 3) {
1427 spin_lock_irqsave(&intf->counter_lock, flags);
1428 intf->sent_invalid_commands++;
1429 spin_unlock_irqrestore(&intf->counter_lock, flags);
1434 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1436 if (recv_msg->msg.netfn & 0x1) {
1437 /* It's a response, so use the user's sequence
1439 spin_lock_irqsave(&intf->counter_lock, flags);
1440 intf->sent_lan_responses++;
1441 spin_unlock_irqrestore(&intf->counter_lock, flags);
1442 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1445 /* Save the receive message so we can use it
1446 to deliver the response. */
1447 smi_msg->user_data = recv_msg;
1449 /* It's a command, so get a sequence for it. */
1451 spin_lock_irqsave(&(intf->seq_lock), flags);
1453 spin_lock(&intf->counter_lock);
1454 intf->sent_lan_commands++;
1455 spin_unlock(&intf->counter_lock);
1457 /* Create a sequence number with a 1 second
1458 timeout and 4 retries. */
1459 rv = intf_next_seq(intf,
1467 /* We have used up all the sequence numbers,
1468 probably, so abort. */
1469 spin_unlock_irqrestore(&(intf->seq_lock),
1474 /* Store the sequence number in the message,
1475 so that when the send message response
1476 comes back we can start the timer. */
1477 format_lan_msg(smi_msg, msg, lan_addr,
1478 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1479 ipmb_seq, source_lun);
1481 /* Copy the message into the recv message data, so we
1482 can retransmit it later if necessary. */
1483 memcpy(recv_msg->msg_data, smi_msg->data,
1484 smi_msg->data_size);
1485 recv_msg->msg.data = recv_msg->msg_data;
1486 recv_msg->msg.data_len = smi_msg->data_size;
1488 /* We don't unlock until here, because we need
1489 to copy the completed message into the
1490 recv_msg before we release the lock.
1491 Otherwise, race conditions may bite us. I
1492 know that's pretty paranoid, but I prefer
1494 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1497 /* Unknown address type. */
1498 spin_lock_irqsave(&intf->counter_lock, flags);
1499 intf->sent_invalid_commands++;
1500 spin_unlock_irqrestore(&intf->counter_lock, flags);
1508 for (m = 0; m < smi_msg->data_size; m++)
1509 printk(" %2.2x", smi_msg->data[m]);
1513 intf->handlers->sender(intf->send_info, smi_msg, priority);
1518 ipmi_free_smi_msg(smi_msg);
1519 ipmi_free_recv_msg(recv_msg);
1523 static int check_addr(ipmi_smi_t intf,
1524 struct ipmi_addr *addr,
1525 unsigned char *saddr,
1528 if (addr->channel >= IPMI_MAX_CHANNELS)
1530 *lun = intf->channels[addr->channel].lun;
1531 *saddr = intf->channels[addr->channel].address;
1535 int ipmi_request_settime(ipmi_user_t user,
1536 struct ipmi_addr *addr,
1538 struct kernel_ipmi_msg *msg,
1539 void *user_msg_data,
1542 unsigned int retry_time_ms)
1544 unsigned char saddr, lun;
1549 rv = check_addr(user->intf, addr, &saddr, &lun);
1552 return i_ipmi_request(user,
1566 int ipmi_request_supply_msgs(ipmi_user_t user,
1567 struct ipmi_addr *addr,
1569 struct kernel_ipmi_msg *msg,
1570 void *user_msg_data,
1572 struct ipmi_recv_msg *supplied_recv,
1575 unsigned char saddr, lun;
1580 rv = check_addr(user->intf, addr, &saddr, &lun);
1583 return i_ipmi_request(user,
1597 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1598 int count, int *eof, void *data)
1600 char *out = (char *) page;
1601 ipmi_smi_t intf = data;
1605 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1606 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1607 out[rv-1] = '\n'; /* Replace the final space with a newline */
1613 static int version_file_read_proc(char *page, char **start, off_t off,
1614 int count, int *eof, void *data)
1616 char *out = (char *) page;
1617 ipmi_smi_t intf = data;
1619 return sprintf(out, "%d.%d\n",
1620 ipmi_version_major(&intf->bmc->id),
1621 ipmi_version_minor(&intf->bmc->id));
1624 static int stat_file_read_proc(char *page, char **start, off_t off,
1625 int count, int *eof, void *data)
1627 char *out = (char *) page;
1628 ipmi_smi_t intf = data;
1630 out += sprintf(out, "sent_invalid_commands: %d\n",
1631 intf->sent_invalid_commands);
1632 out += sprintf(out, "sent_local_commands: %d\n",
1633 intf->sent_local_commands);
1634 out += sprintf(out, "handled_local_responses: %d\n",
1635 intf->handled_local_responses);
1636 out += sprintf(out, "unhandled_local_responses: %d\n",
1637 intf->unhandled_local_responses);
1638 out += sprintf(out, "sent_ipmb_commands: %d\n",
1639 intf->sent_ipmb_commands);
1640 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1641 intf->sent_ipmb_command_errs);
1642 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1643 intf->retransmitted_ipmb_commands);
1644 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1645 intf->timed_out_ipmb_commands);
1646 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1647 intf->timed_out_ipmb_broadcasts);
1648 out += sprintf(out, "sent_ipmb_responses: %d\n",
1649 intf->sent_ipmb_responses);
1650 out += sprintf(out, "handled_ipmb_responses: %d\n",
1651 intf->handled_ipmb_responses);
1652 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1653 intf->invalid_ipmb_responses);
1654 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1655 intf->unhandled_ipmb_responses);
1656 out += sprintf(out, "sent_lan_commands: %d\n",
1657 intf->sent_lan_commands);
1658 out += sprintf(out, "sent_lan_command_errs: %d\n",
1659 intf->sent_lan_command_errs);
1660 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1661 intf->retransmitted_lan_commands);
1662 out += sprintf(out, "timed_out_lan_commands: %d\n",
1663 intf->timed_out_lan_commands);
1664 out += sprintf(out, "sent_lan_responses: %d\n",
1665 intf->sent_lan_responses);
1666 out += sprintf(out, "handled_lan_responses: %d\n",
1667 intf->handled_lan_responses);
1668 out += sprintf(out, "invalid_lan_responses: %d\n",
1669 intf->invalid_lan_responses);
1670 out += sprintf(out, "unhandled_lan_responses: %d\n",
1671 intf->unhandled_lan_responses);
1672 out += sprintf(out, "handled_commands: %d\n",
1673 intf->handled_commands);
1674 out += sprintf(out, "invalid_commands: %d\n",
1675 intf->invalid_commands);
1676 out += sprintf(out, "unhandled_commands: %d\n",
1677 intf->unhandled_commands);
1678 out += sprintf(out, "invalid_events: %d\n",
1679 intf->invalid_events);
1680 out += sprintf(out, "events: %d\n",
1683 return (out - ((char *) page));
1686 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1687 read_proc_t *read_proc, write_proc_t *write_proc,
1688 void *data, struct module *owner)
1691 #ifdef CONFIG_PROC_FS
1692 struct proc_dir_entry *file;
1693 struct ipmi_proc_entry *entry;
1695 /* Create a list element. */
1696 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1699 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1704 strcpy(entry->name, name);
1706 file = create_proc_entry(name, 0, smi->proc_dir);
1714 file->read_proc = read_proc;
1715 file->write_proc = write_proc;
1716 file->owner = owner;
1718 spin_lock(&smi->proc_entry_lock);
1719 /* Stick it on the list. */
1720 entry->next = smi->proc_entries;
1721 smi->proc_entries = entry;
1722 spin_unlock(&smi->proc_entry_lock);
1724 #endif /* CONFIG_PROC_FS */
1729 static int add_proc_entries(ipmi_smi_t smi, int num)
1733 #ifdef CONFIG_PROC_FS
1734 sprintf(smi->proc_dir_name, "%d", num);
1735 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1739 smi->proc_dir->owner = THIS_MODULE;
1743 rv = ipmi_smi_add_proc_entry(smi, "stats",
1744 stat_file_read_proc, NULL,
1748 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1749 ipmb_file_read_proc, NULL,
1753 rv = ipmi_smi_add_proc_entry(smi, "version",
1754 version_file_read_proc, NULL,
1756 #endif /* CONFIG_PROC_FS */
1761 static void remove_proc_entries(ipmi_smi_t smi)
1763 #ifdef CONFIG_PROC_FS
1764 struct ipmi_proc_entry *entry;
1766 spin_lock(&smi->proc_entry_lock);
1767 while (smi->proc_entries) {
1768 entry = smi->proc_entries;
1769 smi->proc_entries = entry->next;
1771 remove_proc_entry(entry->name, smi->proc_dir);
1775 spin_unlock(&smi->proc_entry_lock);
1776 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1777 #endif /* CONFIG_PROC_FS */
1780 static int __find_bmc_guid(struct device *dev, void *data)
1782 unsigned char *id = data;
1783 struct bmc_device *bmc = dev_get_drvdata(dev);
1784 return memcmp(bmc->guid, id, 16) == 0;
1787 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1788 unsigned char *guid)
1792 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1794 return dev_get_drvdata(dev);
1799 struct prod_dev_id {
1800 unsigned int product_id;
1801 unsigned char device_id;
1804 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1806 struct prod_dev_id *id = data;
1807 struct bmc_device *bmc = dev_get_drvdata(dev);
1809 return (bmc->id.product_id == id->product_id
1810 && bmc->id.product_id == id->product_id
1811 && bmc->id.device_id == id->device_id);
1814 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1815 struct device_driver *drv,
1816 unsigned char product_id, unsigned char device_id)
1818 struct prod_dev_id id = {
1819 .product_id = product_id,
1820 .device_id = device_id,
1824 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1826 return dev_get_drvdata(dev);
1831 static ssize_t device_id_show(struct device *dev,
1832 struct device_attribute *attr,
1835 struct bmc_device *bmc = dev_get_drvdata(dev);
1837 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1840 static ssize_t provides_dev_sdrs_show(struct device *dev,
1841 struct device_attribute *attr,
1844 struct bmc_device *bmc = dev_get_drvdata(dev);
1846 return snprintf(buf, 10, "%u\n",
1847 bmc->id.device_revision && 0x80 >> 7);
1850 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1853 struct bmc_device *bmc = dev_get_drvdata(dev);
1855 return snprintf(buf, 20, "%u\n",
1856 bmc->id.device_revision && 0x0F);
1859 static ssize_t firmware_rev_show(struct device *dev,
1860 struct device_attribute *attr,
1863 struct bmc_device *bmc = dev_get_drvdata(dev);
1865 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1866 bmc->id.firmware_revision_2);
1869 static ssize_t ipmi_version_show(struct device *dev,
1870 struct device_attribute *attr,
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1875 return snprintf(buf, 20, "%u.%u\n",
1876 ipmi_version_major(&bmc->id),
1877 ipmi_version_minor(&bmc->id));
1880 static ssize_t add_dev_support_show(struct device *dev,
1881 struct device_attribute *attr,
1884 struct bmc_device *bmc = dev_get_drvdata(dev);
1886 return snprintf(buf, 10, "0x%02x\n",
1887 bmc->id.additional_device_support);
1890 static ssize_t manufacturer_id_show(struct device *dev,
1891 struct device_attribute *attr,
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1896 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1899 static ssize_t product_id_show(struct device *dev,
1900 struct device_attribute *attr,
1903 struct bmc_device *bmc = dev_get_drvdata(dev);
1905 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1908 static ssize_t aux_firmware_rev_show(struct device *dev,
1909 struct device_attribute *attr,
1912 struct bmc_device *bmc = dev_get_drvdata(dev);
1914 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1915 bmc->id.aux_firmware_revision[3],
1916 bmc->id.aux_firmware_revision[2],
1917 bmc->id.aux_firmware_revision[1],
1918 bmc->id.aux_firmware_revision[0]);
1921 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1924 struct bmc_device *bmc = dev_get_drvdata(dev);
1926 return snprintf(buf, 100, "%Lx%Lx\n",
1927 (long long) bmc->guid[0],
1928 (long long) bmc->guid[8]);
1931 static void remove_files(struct bmc_device *bmc)
1933 device_remove_file(&bmc->dev->dev,
1934 &bmc->device_id_attr);
1935 device_remove_file(&bmc->dev->dev,
1936 &bmc->provides_dev_sdrs_attr);
1937 device_remove_file(&bmc->dev->dev,
1938 &bmc->revision_attr);
1939 device_remove_file(&bmc->dev->dev,
1940 &bmc->firmware_rev_attr);
1941 device_remove_file(&bmc->dev->dev,
1942 &bmc->version_attr);
1943 device_remove_file(&bmc->dev->dev,
1944 &bmc->add_dev_support_attr);
1945 device_remove_file(&bmc->dev->dev,
1946 &bmc->manufacturer_id_attr);
1947 device_remove_file(&bmc->dev->dev,
1948 &bmc->product_id_attr);
1950 if (bmc->id.aux_firmware_revision_set)
1951 device_remove_file(&bmc->dev->dev,
1952 &bmc->aux_firmware_rev_attr);
1954 device_remove_file(&bmc->dev->dev,
1959 cleanup_bmc_device(struct kref *ref)
1961 struct bmc_device *bmc;
1963 bmc = container_of(ref, struct bmc_device, refcount);
1966 platform_device_unregister(bmc->dev);
1970 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1972 struct bmc_device *bmc = intf->bmc;
1974 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1975 if (intf->my_dev_name) {
1976 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1977 kfree(intf->my_dev_name);
1978 intf->my_dev_name = NULL;
1981 mutex_lock(&ipmidriver_mutex);
1982 kref_put(&bmc->refcount, cleanup_bmc_device);
1983 mutex_unlock(&ipmidriver_mutex);
1986 static int create_files(struct bmc_device *bmc)
1990 err = device_create_file(&bmc->dev->dev,
1991 &bmc->device_id_attr);
1993 err = device_create_file(&bmc->dev->dev,
1994 &bmc->provides_dev_sdrs_attr);
1995 if (err) goto out_devid;
1996 err = device_create_file(&bmc->dev->dev,
1997 &bmc->revision_attr);
1998 if (err) goto out_sdrs;
1999 err = device_create_file(&bmc->dev->dev,
2000 &bmc->firmware_rev_attr);
2001 if (err) goto out_rev;
2002 err = device_create_file(&bmc->dev->dev,
2003 &bmc->version_attr);
2004 if (err) goto out_firm;
2005 err = device_create_file(&bmc->dev->dev,
2006 &bmc->add_dev_support_attr);
2007 if (err) goto out_version;
2008 err = device_create_file(&bmc->dev->dev,
2009 &bmc->manufacturer_id_attr);
2010 if (err) goto out_add_dev;
2011 err = device_create_file(&bmc->dev->dev,
2012 &bmc->product_id_attr);
2013 if (err) goto out_manu;
2014 if (bmc->id.aux_firmware_revision_set) {
2015 err = device_create_file(&bmc->dev->dev,
2016 &bmc->aux_firmware_rev_attr);
2017 if (err) goto out_prod_id;
2019 if (bmc->guid_set) {
2020 err = device_create_file(&bmc->dev->dev,
2022 if (err) goto out_aux_firm;
2028 if (bmc->id.aux_firmware_revision_set)
2029 device_remove_file(&bmc->dev->dev,
2030 &bmc->aux_firmware_rev_attr);
2032 device_remove_file(&bmc->dev->dev,
2033 &bmc->product_id_attr);
2035 device_remove_file(&bmc->dev->dev,
2036 &bmc->manufacturer_id_attr);
2038 device_remove_file(&bmc->dev->dev,
2039 &bmc->add_dev_support_attr);
2041 device_remove_file(&bmc->dev->dev,
2042 &bmc->version_attr);
2044 device_remove_file(&bmc->dev->dev,
2045 &bmc->firmware_rev_attr);
2047 device_remove_file(&bmc->dev->dev,
2048 &bmc->revision_attr);
2050 device_remove_file(&bmc->dev->dev,
2051 &bmc->provides_dev_sdrs_attr);
2053 device_remove_file(&bmc->dev->dev,
2054 &bmc->device_id_attr);
2059 static int ipmi_bmc_register(ipmi_smi_t intf)
2062 struct bmc_device *bmc = intf->bmc;
2063 struct bmc_device *old_bmc;
2067 mutex_lock(&ipmidriver_mutex);
2070 * Try to find if there is an bmc_device struct
2071 * representing the interfaced BMC already
2074 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2076 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2081 * If there is already an bmc_device, free the new one,
2082 * otherwise register the new BMC device
2086 intf->bmc = old_bmc;
2089 kref_get(&bmc->refcount);
2090 mutex_unlock(&ipmidriver_mutex);
2093 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2094 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2095 bmc->id.manufacturer_id,
2099 bmc->dev = platform_device_alloc("ipmi_bmc",
2104 " Unable to allocate platform device\n");
2107 bmc->dev->dev.driver = &ipmidriver;
2108 dev_set_drvdata(&bmc->dev->dev, bmc);
2109 kref_init(&bmc->refcount);
2111 rv = platform_device_register(bmc->dev);
2112 mutex_unlock(&ipmidriver_mutex);
2116 " Unable to register bmc device: %d\n",
2118 /* Don't go to out_err, you can only do that if
2119 the device is registered already. */
2123 bmc->device_id_attr.attr.name = "device_id";
2124 bmc->device_id_attr.attr.owner = THIS_MODULE;
2125 bmc->device_id_attr.attr.mode = S_IRUGO;
2126 bmc->device_id_attr.show = device_id_show;
2128 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2129 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2130 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2131 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2133 bmc->revision_attr.attr.name = "revision";
2134 bmc->revision_attr.attr.owner = THIS_MODULE;
2135 bmc->revision_attr.attr.mode = S_IRUGO;
2136 bmc->revision_attr.show = revision_show;
2138 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2139 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2140 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2141 bmc->firmware_rev_attr.show = firmware_rev_show;
2143 bmc->version_attr.attr.name = "ipmi_version";
2144 bmc->version_attr.attr.owner = THIS_MODULE;
2145 bmc->version_attr.attr.mode = S_IRUGO;
2146 bmc->version_attr.show = ipmi_version_show;
2148 bmc->add_dev_support_attr.attr.name
2149 = "additional_device_support";
2150 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2151 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2152 bmc->add_dev_support_attr.show = add_dev_support_show;
2154 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2155 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2156 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2157 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2159 bmc->product_id_attr.attr.name = "product_id";
2160 bmc->product_id_attr.attr.owner = THIS_MODULE;
2161 bmc->product_id_attr.attr.mode = S_IRUGO;
2162 bmc->product_id_attr.show = product_id_show;
2164 bmc->guid_attr.attr.name = "guid";
2165 bmc->guid_attr.attr.owner = THIS_MODULE;
2166 bmc->guid_attr.attr.mode = S_IRUGO;
2167 bmc->guid_attr.show = guid_show;
2169 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2170 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2171 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2172 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2174 rv = create_files(bmc);
2176 mutex_lock(&ipmidriver_mutex);
2177 platform_device_unregister(bmc->dev);
2178 mutex_unlock(&ipmidriver_mutex);
2184 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2185 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2186 bmc->id.manufacturer_id,
2192 * create symlink from system interface device to bmc device
2195 rv = sysfs_create_link(&intf->si_dev->kobj,
2196 &bmc->dev->dev.kobj, "bmc");
2199 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2204 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2205 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2206 if (!intf->my_dev_name) {
2209 "ipmi_msghandler: allocate link from BMC: %d\n",
2213 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2215 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2218 kfree(intf->my_dev_name);
2219 intf->my_dev_name = NULL;
2222 " Unable to create symlink to bmc: %d\n",
2230 ipmi_bmc_unregister(intf);
2235 send_guid_cmd(ipmi_smi_t intf, int chan)
2237 struct kernel_ipmi_msg msg;
2238 struct ipmi_system_interface_addr si;
2240 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2241 si.channel = IPMI_BMC_CHANNEL;
2244 msg.netfn = IPMI_NETFN_APP_REQUEST;
2245 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2248 return i_ipmi_request(NULL,
2250 (struct ipmi_addr *) &si,
2257 intf->channels[0].address,
2258 intf->channels[0].lun,
2263 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2265 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2266 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2267 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2271 if (msg->msg.data[0] != 0) {
2272 /* Error from getting the GUID, the BMC doesn't have one. */
2273 intf->bmc->guid_set = 0;
2277 if (msg->msg.data_len < 17) {
2278 intf->bmc->guid_set = 0;
2279 printk(KERN_WARNING PFX
2280 "guid_handler: The GUID response from the BMC was too"
2281 " short, it was %d but should have been 17. Assuming"
2282 " GUID is not available.\n",
2287 memcpy(intf->bmc->guid, msg->msg.data, 16);
2288 intf->bmc->guid_set = 1;
2290 wake_up(&intf->waitq);
2294 get_guid(ipmi_smi_t intf)
2298 intf->bmc->guid_set = 0x2;
2299 intf->null_user_handler = guid_handler;
2300 rv = send_guid_cmd(intf, 0);
2302 /* Send failed, no GUID available. */
2303 intf->bmc->guid_set = 0;
2304 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2305 intf->null_user_handler = NULL;
2309 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2311 struct kernel_ipmi_msg msg;
2312 unsigned char data[1];
2313 struct ipmi_system_interface_addr si;
2315 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2316 si.channel = IPMI_BMC_CHANNEL;
2319 msg.netfn = IPMI_NETFN_APP_REQUEST;
2320 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2324 return i_ipmi_request(NULL,
2326 (struct ipmi_addr *) &si,
2333 intf->channels[0].address,
2334 intf->channels[0].lun,
2339 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2344 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2345 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2346 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2348 /* It's the one we want */
2349 if (msg->msg.data[0] != 0) {
2350 /* Got an error from the channel, just go on. */
2352 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2353 /* If the MC does not support this
2354 command, that is legal. We just
2355 assume it has one IPMB at channel
2357 intf->channels[0].medium
2358 = IPMI_CHANNEL_MEDIUM_IPMB;
2359 intf->channels[0].protocol
2360 = IPMI_CHANNEL_PROTOCOL_IPMB;
2363 intf->curr_channel = IPMI_MAX_CHANNELS;
2364 wake_up(&intf->waitq);
2369 if (msg->msg.data_len < 4) {
2370 /* Message not big enough, just go on. */
2373 chan = intf->curr_channel;
2374 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2375 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2378 intf->curr_channel++;
2379 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2380 wake_up(&intf->waitq);
2382 rv = send_channel_info_cmd(intf, intf->curr_channel);
2385 /* Got an error somehow, just give up. */
2386 intf->curr_channel = IPMI_MAX_CHANNELS;
2387 wake_up(&intf->waitq);
2389 printk(KERN_WARNING PFX
2390 "Error sending channel information: %d\n",
2398 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2400 struct ipmi_device_id *device_id,
2401 struct device *si_dev,
2402 unsigned char slave_addr)
2407 unsigned long flags;
2411 version_major = ipmi_version_major(device_id);
2412 version_minor = ipmi_version_minor(device_id);
2414 /* Make sure the driver is actually initialized, this handles
2415 problems with initialization order. */
2417 rv = ipmi_init_msghandler();
2420 /* The init code doesn't return an error if it was turned
2421 off, but it won't initialize. Check that. */
2426 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2429 memset(intf, 0, sizeof(*intf));
2430 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2435 intf->intf_num = -1;
2436 kref_init(&intf->refcount);
2437 intf->bmc->id = *device_id;
2438 intf->si_dev = si_dev;
2439 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2440 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2441 intf->channels[j].lun = 2;
2443 if (slave_addr != 0)
2444 intf->channels[0].address = slave_addr;
2445 INIT_LIST_HEAD(&intf->users);
2446 intf->handlers = handlers;
2447 intf->send_info = send_info;
2448 spin_lock_init(&intf->seq_lock);
2449 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2450 intf->seq_table[j].inuse = 0;
2451 intf->seq_table[j].seqid = 0;
2454 #ifdef CONFIG_PROC_FS
2455 spin_lock_init(&intf->proc_entry_lock);
2457 spin_lock_init(&intf->waiting_msgs_lock);
2458 INIT_LIST_HEAD(&intf->waiting_msgs);
2459 spin_lock_init(&intf->events_lock);
2460 INIT_LIST_HEAD(&intf->waiting_events);
2461 intf->waiting_events_count = 0;
2462 mutex_init(&intf->cmd_rcvrs_mutex);
2463 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2464 init_waitqueue_head(&intf->waitq);
2466 spin_lock_init(&intf->counter_lock);
2467 intf->proc_dir = NULL;
2470 spin_lock_irqsave(&interfaces_lock, flags);
2471 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2472 if (ipmi_interfaces[i] == NULL) {
2474 /* Reserve the entry till we are done. */
2475 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2480 spin_unlock_irqrestore(&interfaces_lock, flags);
2484 rv = handlers->start_processing(send_info, intf);
2490 if ((version_major > 1)
2491 || ((version_major == 1) && (version_minor >= 5)))
2493 /* Start scanning the channels to see what is
2495 intf->null_user_handler = channel_handler;
2496 intf->curr_channel = 0;
2497 rv = send_channel_info_cmd(intf, 0);
2501 /* Wait for the channel info to be read. */
2502 wait_event(intf->waitq,
2503 intf->curr_channel >= IPMI_MAX_CHANNELS);
2504 intf->null_user_handler = NULL;
2506 /* Assume a single IPMB channel at zero. */
2507 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2508 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2512 rv = add_proc_entries(intf, i);
2514 rv = ipmi_bmc_register(intf);
2519 remove_proc_entries(intf);
2520 kref_put(&intf->refcount, intf_free);
2521 if (i < MAX_IPMI_INTERFACES) {
2522 spin_lock_irqsave(&interfaces_lock, flags);
2523 ipmi_interfaces[i] = NULL;
2524 spin_unlock_irqrestore(&interfaces_lock, flags);
2527 spin_lock_irqsave(&interfaces_lock, flags);
2528 ipmi_interfaces[i] = intf;
2529 spin_unlock_irqrestore(&interfaces_lock, flags);
2530 call_smi_watchers(i, intf->si_dev);
2536 int ipmi_unregister_smi(ipmi_smi_t intf)
2539 struct ipmi_smi_watcher *w;
2540 unsigned long flags;
2542 ipmi_bmc_unregister(intf);
2544 spin_lock_irqsave(&interfaces_lock, flags);
2545 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2546 if (ipmi_interfaces[i] == intf) {
2547 /* Set the interface number reserved until we
2549 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2550 intf->intf_num = -1;
2554 spin_unlock_irqrestore(&interfaces_lock,flags);
2556 if (i == MAX_IPMI_INTERFACES)
2559 remove_proc_entries(intf);
2561 /* Call all the watcher interfaces to tell them that
2562 an interface is gone. */
2563 down_read(&smi_watchers_sem);
2564 list_for_each_entry(w, &smi_watchers, link)
2566 up_read(&smi_watchers_sem);
2568 /* Allow the entry to be reused now. */
2569 spin_lock_irqsave(&interfaces_lock, flags);
2570 ipmi_interfaces[i] = NULL;
2571 spin_unlock_irqrestore(&interfaces_lock,flags);
2573 kref_put(&intf->refcount, intf_free);
2577 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2578 struct ipmi_smi_msg *msg)
2580 struct ipmi_ipmb_addr ipmb_addr;
2581 struct ipmi_recv_msg *recv_msg;
2582 unsigned long flags;
2585 /* This is 11, not 10, because the response must contain a
2586 * completion code. */
2587 if (msg->rsp_size < 11) {
2588 /* Message not big enough, just ignore it. */
2589 spin_lock_irqsave(&intf->counter_lock, flags);
2590 intf->invalid_ipmb_responses++;
2591 spin_unlock_irqrestore(&intf->counter_lock, flags);
2595 if (msg->rsp[2] != 0) {
2596 /* An error getting the response, just ignore it. */
2600 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2601 ipmb_addr.slave_addr = msg->rsp[6];
2602 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2603 ipmb_addr.lun = msg->rsp[7] & 3;
2605 /* It's a response from a remote entity. Look up the sequence
2606 number and handle the response. */
2607 if (intf_find_seq(intf,
2611 (msg->rsp[4] >> 2) & (~1),
2612 (struct ipmi_addr *) &(ipmb_addr),
2615 /* We were unable to find the sequence number,
2616 so just nuke the message. */
2617 spin_lock_irqsave(&intf->counter_lock, flags);
2618 intf->unhandled_ipmb_responses++;
2619 spin_unlock_irqrestore(&intf->counter_lock, flags);
2623 memcpy(recv_msg->msg_data,
2626 /* THe other fields matched, so no need to set them, except
2627 for netfn, which needs to be the response that was
2628 returned, not the request value. */
2629 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2630 recv_msg->msg.data = recv_msg->msg_data;
2631 recv_msg->msg.data_len = msg->rsp_size - 10;
2632 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2633 spin_lock_irqsave(&intf->counter_lock, flags);
2634 intf->handled_ipmb_responses++;
2635 spin_unlock_irqrestore(&intf->counter_lock, flags);
2636 deliver_response(recv_msg);
2641 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2642 struct ipmi_smi_msg *msg)
2644 struct cmd_rcvr *rcvr;
2646 unsigned char netfn;
2649 ipmi_user_t user = NULL;
2650 struct ipmi_ipmb_addr *ipmb_addr;
2651 struct ipmi_recv_msg *recv_msg;
2652 unsigned long flags;
2654 if (msg->rsp_size < 10) {
2655 /* Message not big enough, just ignore it. */
2656 spin_lock_irqsave(&intf->counter_lock, flags);
2657 intf->invalid_commands++;
2658 spin_unlock_irqrestore(&intf->counter_lock, flags);
2662 if (msg->rsp[2] != 0) {
2663 /* An error getting the response, just ignore it. */
2667 netfn = msg->rsp[4] >> 2;
2669 chan = msg->rsp[3] & 0xf;
2672 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2675 kref_get(&user->refcount);
2681 /* We didn't find a user, deliver an error response. */
2682 spin_lock_irqsave(&intf->counter_lock, flags);
2683 intf->unhandled_commands++;
2684 spin_unlock_irqrestore(&intf->counter_lock, flags);
2686 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2687 msg->data[1] = IPMI_SEND_MSG_CMD;
2688 msg->data[2] = msg->rsp[3];
2689 msg->data[3] = msg->rsp[6];
2690 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2691 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2692 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2694 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2695 msg->data[8] = msg->rsp[8]; /* cmd */
2696 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2697 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2698 msg->data_size = 11;
2703 printk("Invalid command:");
2704 for (m = 0; m < msg->data_size; m++)
2705 printk(" %2.2x", msg->data[m]);
2709 intf->handlers->sender(intf->send_info, msg, 0);
2711 rv = -1; /* We used the message, so return the value that
2712 causes it to not be freed or queued. */
2714 /* Deliver the message to the user. */
2715 spin_lock_irqsave(&intf->counter_lock, flags);
2716 intf->handled_commands++;
2717 spin_unlock_irqrestore(&intf->counter_lock, flags);
2719 recv_msg = ipmi_alloc_recv_msg();
2721 /* We couldn't allocate memory for the
2722 message, so requeue it for handling
2725 kref_put(&user->refcount, free_user);
2727 /* Extract the source address from the data. */
2728 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2729 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2730 ipmb_addr->slave_addr = msg->rsp[6];
2731 ipmb_addr->lun = msg->rsp[7] & 3;
2732 ipmb_addr->channel = msg->rsp[3] & 0xf;
2734 /* Extract the rest of the message information
2735 from the IPMB header.*/
2736 recv_msg->user = user;
2737 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2738 recv_msg->msgid = msg->rsp[7] >> 2;
2739 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2740 recv_msg->msg.cmd = msg->rsp[8];
2741 recv_msg->msg.data = recv_msg->msg_data;
2743 /* We chop off 10, not 9 bytes because the checksum
2744 at the end also needs to be removed. */
2745 recv_msg->msg.data_len = msg->rsp_size - 10;
2746 memcpy(recv_msg->msg_data,
2748 msg->rsp_size - 10);
2749 deliver_response(recv_msg);
2756 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2757 struct ipmi_smi_msg *msg)
2759 struct ipmi_lan_addr lan_addr;
2760 struct ipmi_recv_msg *recv_msg;
2761 unsigned long flags;
2764 /* This is 13, not 12, because the response must contain a
2765 * completion code. */
2766 if (msg->rsp_size < 13) {
2767 /* Message not big enough, just ignore it. */
2768 spin_lock_irqsave(&intf->counter_lock, flags);
2769 intf->invalid_lan_responses++;
2770 spin_unlock_irqrestore(&intf->counter_lock, flags);
2774 if (msg->rsp[2] != 0) {
2775 /* An error getting the response, just ignore it. */
2779 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2780 lan_addr.session_handle = msg->rsp[4];
2781 lan_addr.remote_SWID = msg->rsp[8];
2782 lan_addr.local_SWID = msg->rsp[5];
2783 lan_addr.channel = msg->rsp[3] & 0x0f;
2784 lan_addr.privilege = msg->rsp[3] >> 4;
2785 lan_addr.lun = msg->rsp[9] & 3;
2787 /* It's a response from a remote entity. Look up the sequence
2788 number and handle the response. */
2789 if (intf_find_seq(intf,
2793 (msg->rsp[6] >> 2) & (~1),
2794 (struct ipmi_addr *) &(lan_addr),
2797 /* We were unable to find the sequence number,
2798 so just nuke the message. */
2799 spin_lock_irqsave(&intf->counter_lock, flags);
2800 intf->unhandled_lan_responses++;
2801 spin_unlock_irqrestore(&intf->counter_lock, flags);
2805 memcpy(recv_msg->msg_data,
2807 msg->rsp_size - 11);
2808 /* The other fields matched, so no need to set them, except
2809 for netfn, which needs to be the response that was
2810 returned, not the request value. */
2811 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2812 recv_msg->msg.data = recv_msg->msg_data;
2813 recv_msg->msg.data_len = msg->rsp_size - 12;
2814 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2815 spin_lock_irqsave(&intf->counter_lock, flags);
2816 intf->handled_lan_responses++;
2817 spin_unlock_irqrestore(&intf->counter_lock, flags);
2818 deliver_response(recv_msg);
2823 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2824 struct ipmi_smi_msg *msg)
2826 struct cmd_rcvr *rcvr;
2828 unsigned char netfn;
2831 ipmi_user_t user = NULL;
2832 struct ipmi_lan_addr *lan_addr;
2833 struct ipmi_recv_msg *recv_msg;
2834 unsigned long flags;
2836 if (msg->rsp_size < 12) {
2837 /* Message not big enough, just ignore it. */
2838 spin_lock_irqsave(&intf->counter_lock, flags);
2839 intf->invalid_commands++;
2840 spin_unlock_irqrestore(&intf->counter_lock, flags);
2844 if (msg->rsp[2] != 0) {
2845 /* An error getting the response, just ignore it. */
2849 netfn = msg->rsp[6] >> 2;
2851 chan = msg->rsp[3] & 0xf;
2854 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2857 kref_get(&user->refcount);
2863 /* We didn't find a user, just give up. */
2864 spin_lock_irqsave(&intf->counter_lock, flags);
2865 intf->unhandled_commands++;
2866 spin_unlock_irqrestore(&intf->counter_lock, flags);
2868 rv = 0; /* Don't do anything with these messages, just
2869 allow them to be freed. */
2871 /* Deliver the message to the user. */
2872 spin_lock_irqsave(&intf->counter_lock, flags);
2873 intf->handled_commands++;
2874 spin_unlock_irqrestore(&intf->counter_lock, flags);
2876 recv_msg = ipmi_alloc_recv_msg();
2878 /* We couldn't allocate memory for the
2879 message, so requeue it for handling
2882 kref_put(&user->refcount, free_user);
2884 /* Extract the source address from the data. */
2885 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2886 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2887 lan_addr->session_handle = msg->rsp[4];
2888 lan_addr->remote_SWID = msg->rsp[8];
2889 lan_addr->local_SWID = msg->rsp[5];
2890 lan_addr->lun = msg->rsp[9] & 3;
2891 lan_addr->channel = msg->rsp[3] & 0xf;
2892 lan_addr->privilege = msg->rsp[3] >> 4;
2894 /* Extract the rest of the message information
2895 from the IPMB header.*/
2896 recv_msg->user = user;
2897 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2898 recv_msg->msgid = msg->rsp[9] >> 2;
2899 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2900 recv_msg->msg.cmd = msg->rsp[10];
2901 recv_msg->msg.data = recv_msg->msg_data;
2903 /* We chop off 12, not 11 bytes because the checksum
2904 at the end also needs to be removed. */
2905 recv_msg->msg.data_len = msg->rsp_size - 12;
2906 memcpy(recv_msg->msg_data,
2908 msg->rsp_size - 12);
2909 deliver_response(recv_msg);
2916 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2917 struct ipmi_smi_msg *msg)
2919 struct ipmi_system_interface_addr *smi_addr;
2921 recv_msg->msgid = 0;
2922 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2923 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2924 smi_addr->channel = IPMI_BMC_CHANNEL;
2925 smi_addr->lun = msg->rsp[0] & 3;
2926 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2927 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2928 recv_msg->msg.cmd = msg->rsp[1];
2929 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2930 recv_msg->msg.data = recv_msg->msg_data;
2931 recv_msg->msg.data_len = msg->rsp_size - 3;
2934 static int handle_read_event_rsp(ipmi_smi_t intf,
2935 struct ipmi_smi_msg *msg)
2937 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2938 struct list_head msgs;
2941 int deliver_count = 0;
2942 unsigned long flags;
2944 if (msg->rsp_size < 19) {
2945 /* Message is too small to be an IPMB event. */
2946 spin_lock_irqsave(&intf->counter_lock, flags);
2947 intf->invalid_events++;
2948 spin_unlock_irqrestore(&intf->counter_lock, flags);
2952 if (msg->rsp[2] != 0) {
2953 /* An error getting the event, just ignore it. */
2957 INIT_LIST_HEAD(&msgs);
2959 spin_lock_irqsave(&intf->events_lock, flags);
2961 spin_lock(&intf->counter_lock);
2963 spin_unlock(&intf->counter_lock);
2965 /* Allocate and fill in one message for every user that is getting
2968 list_for_each_entry_rcu(user, &intf->users, link) {
2969 if (!user->gets_events)
2972 recv_msg = ipmi_alloc_recv_msg();
2975 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2977 list_del(&recv_msg->link);
2978 ipmi_free_recv_msg(recv_msg);
2980 /* We couldn't allocate memory for the
2981 message, so requeue it for handling
2989 copy_event_into_recv_msg(recv_msg, msg);
2990 recv_msg->user = user;
2991 kref_get(&user->refcount);
2992 list_add_tail(&(recv_msg->link), &msgs);
2996 if (deliver_count) {
2997 /* Now deliver all the messages. */
2998 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2999 list_del(&recv_msg->link);
3000 deliver_response(recv_msg);
3002 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3003 /* No one to receive the message, put it in queue if there's
3004 not already too many things in the queue. */
3005 recv_msg = ipmi_alloc_recv_msg();
3007 /* We couldn't allocate memory for the
3008 message, so requeue it for handling
3014 copy_event_into_recv_msg(recv_msg, msg);
3015 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3016 intf->waiting_events_count++;
3018 /* There's too many things in the queue, discard this
3020 printk(KERN_WARNING PFX "Event queue full, discarding an"
3021 " incoming event\n");
3025 spin_unlock_irqrestore(&(intf->events_lock), flags);
3030 static int handle_bmc_rsp(ipmi_smi_t intf,
3031 struct ipmi_smi_msg *msg)
3033 struct ipmi_recv_msg *recv_msg;
3034 unsigned long flags;
3035 struct ipmi_user *user;
3037 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3038 if (recv_msg == NULL)
3040 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3041 "could be because of a malformed message, or\n"
3042 "because of a hardware error. Contact your\n"
3043 "hardware vender for assistance\n");
3047 user = recv_msg->user;
3048 /* Make sure the user still exists. */
3049 if (user && !user->valid) {
3050 /* The user for the message went away, so give up. */
3051 spin_lock_irqsave(&intf->counter_lock, flags);
3052 intf->unhandled_local_responses++;
3053 spin_unlock_irqrestore(&intf->counter_lock, flags);
3054 ipmi_free_recv_msg(recv_msg);
3056 struct ipmi_system_interface_addr *smi_addr;
3058 spin_lock_irqsave(&intf->counter_lock, flags);
3059 intf->handled_local_responses++;
3060 spin_unlock_irqrestore(&intf->counter_lock, flags);
3061 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3062 recv_msg->msgid = msg->msgid;
3063 smi_addr = ((struct ipmi_system_interface_addr *)
3065 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3066 smi_addr->channel = IPMI_BMC_CHANNEL;
3067 smi_addr->lun = msg->rsp[0] & 3;
3068 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3069 recv_msg->msg.cmd = msg->rsp[1];
3070 memcpy(recv_msg->msg_data,
3073 recv_msg->msg.data = recv_msg->msg_data;
3074 recv_msg->msg.data_len = msg->rsp_size - 2;
3075 deliver_response(recv_msg);
3081 /* Handle a new message. Return 1 if the message should be requeued,
3082 0 if the message should be freed, or -1 if the message should not
3083 be freed or requeued. */
3084 static int handle_new_recv_msg(ipmi_smi_t intf,
3085 struct ipmi_smi_msg *msg)
3093 for (m = 0; m < msg->rsp_size; m++)
3094 printk(" %2.2x", msg->rsp[m]);
3097 if (msg->rsp_size < 2) {
3098 /* Message is too small to be correct. */
3099 printk(KERN_WARNING PFX "BMC returned to small a message"
3100 " for netfn %x cmd %x, got %d bytes\n",
3101 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3103 /* Generate an error response for the message. */
3104 msg->rsp[0] = msg->data[0] | (1 << 2);
3105 msg->rsp[1] = msg->data[1];
3106 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3108 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3109 || (msg->rsp[1] != msg->data[1])) /* Command */
3111 /* The response is not even marginally correct. */
3112 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3113 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3114 (msg->data[0] >> 2) | 1, msg->data[1],
3115 msg->rsp[0] >> 2, msg->rsp[1]);
3117 /* Generate an error response for the message. */
3118 msg->rsp[0] = msg->data[0] | (1 << 2);
3119 msg->rsp[1] = msg->data[1];
3120 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3124 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3125 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3126 && (msg->user_data != NULL))
3128 /* It's a response to a response we sent. For this we
3129 deliver a send message response to the user. */
3130 struct ipmi_recv_msg *recv_msg = msg->user_data;
3133 if (msg->rsp_size < 2)
3134 /* Message is too small to be correct. */
3137 chan = msg->data[2] & 0x0f;
3138 if (chan >= IPMI_MAX_CHANNELS)
3139 /* Invalid channel number */
3145 /* Make sure the user still exists. */
3146 if (!recv_msg->user || !recv_msg->user->valid)
3149 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3150 recv_msg->msg.data = recv_msg->msg_data;
3151 recv_msg->msg.data_len = 1;
3152 recv_msg->msg_data[0] = msg->rsp[2];
3153 deliver_response(recv_msg);
3154 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3155 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3157 /* It's from the receive queue. */
3158 chan = msg->rsp[3] & 0xf;
3159 if (chan >= IPMI_MAX_CHANNELS) {
3160 /* Invalid channel number */
3165 switch (intf->channels[chan].medium) {
3166 case IPMI_CHANNEL_MEDIUM_IPMB:
3167 if (msg->rsp[4] & 0x04) {
3168 /* It's a response, so find the
3169 requesting message and send it up. */
3170 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3172 /* It's a command to the SMS from some other
3173 entity. Handle that. */
3174 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3178 case IPMI_CHANNEL_MEDIUM_8023LAN:
3179 case IPMI_CHANNEL_MEDIUM_ASYNC:
3180 if (msg->rsp[6] & 0x04) {
3181 /* It's a response, so find the
3182 requesting message and send it up. */
3183 requeue = handle_lan_get_msg_rsp(intf, msg);
3185 /* It's a command to the SMS from some other
3186 entity. Handle that. */
3187 requeue = handle_lan_get_msg_cmd(intf, msg);
3192 /* We don't handle the channel type, so just
3193 * free the message. */
3197 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3198 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3200 /* It's an asyncronous event. */
3201 requeue = handle_read_event_rsp(intf, msg);
3203 /* It's a response from the local BMC. */
3204 requeue = handle_bmc_rsp(intf, msg);
3211 /* Handle a new message from the lower layer. */
3212 void ipmi_smi_msg_received(ipmi_smi_t intf,
3213 struct ipmi_smi_msg *msg)
3215 unsigned long flags;
3219 if ((msg->data_size >= 2)
3220 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3221 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3222 && (msg->user_data == NULL))
3224 /* This is the local response to a command send, start
3225 the timer for these. The user_data will not be
3226 NULL if this is a response send, and we will let
3227 response sends just go through. */
3229 /* Check for errors, if we get certain errors (ones
3230 that mean basically we can try again later), we
3231 ignore them and start the timer. Otherwise we
3232 report the error immediately. */
3233 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3234 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3235 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3237 int chan = msg->rsp[3] & 0xf;
3239 /* Got an error sending the message, handle it. */
3240 spin_lock_irqsave(&intf->counter_lock, flags);
3241 if (chan >= IPMI_MAX_CHANNELS)
3242 ; /* This shouldn't happen */
3243 else if ((intf->channels[chan].medium
3244 == IPMI_CHANNEL_MEDIUM_8023LAN)
3245 || (intf->channels[chan].medium
3246 == IPMI_CHANNEL_MEDIUM_ASYNC))
3247 intf->sent_lan_command_errs++;
3249 intf->sent_ipmb_command_errs++;
3250 spin_unlock_irqrestore(&intf->counter_lock, flags);
3251 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3253 /* The message was sent, start the timer. */
3254 intf_start_seq_timer(intf, msg->msgid);
3257 ipmi_free_smi_msg(msg);
3261 /* To preserve message order, if the list is not empty, we
3262 tack this message onto the end of the list. */
3263 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3264 if (!list_empty(&intf->waiting_msgs)) {
3265 list_add_tail(&msg->link, &intf->waiting_msgs);
3266 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3269 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3271 rv = handle_new_recv_msg(intf, msg);
3273 /* Could not handle the message now, just add it to a
3274 list to handle later. */
3275 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3276 list_add_tail(&msg->link, &intf->waiting_msgs);
3277 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3278 } else if (rv == 0) {
3279 ipmi_free_smi_msg(msg);
3286 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3291 list_for_each_entry_rcu(user, &intf->users, link) {
3292 if (!user->handler->ipmi_watchdog_pretimeout)
3295 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3301 handle_msg_timeout(struct ipmi_recv_msg *msg)
3303 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3304 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3305 msg->msg.netfn |= 1; /* Convert to a response. */
3306 msg->msg.data_len = 1;
3307 msg->msg.data = msg->msg_data;
3308 deliver_response(msg);
3311 static struct ipmi_smi_msg *
3312 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3313 unsigned char seq, long seqid)
3315 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3317 /* If we can't allocate the message, then just return, we
3318 get 4 retries, so this should be ok. */
3321 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3322 smi_msg->data_size = recv_msg->msg.data_len;
3323 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3329 for (m = 0; m < smi_msg->data_size; m++)
3330 printk(" %2.2x", smi_msg->data[m]);
3337 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3338 struct list_head *timeouts, long timeout_period,
3339 int slot, unsigned long *flags)
3341 struct ipmi_recv_msg *msg;
3346 ent->timeout -= timeout_period;
3347 if (ent->timeout > 0)
3350 if (ent->retries_left == 0) {
3351 /* The message has used all its retries. */
3353 msg = ent->recv_msg;
3354 list_add_tail(&msg->link, timeouts);
3355 spin_lock(&intf->counter_lock);
3357 intf->timed_out_ipmb_broadcasts++;
3358 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3359 intf->timed_out_lan_commands++;
3361 intf->timed_out_ipmb_commands++;
3362 spin_unlock(&intf->counter_lock);
3364 struct ipmi_smi_msg *smi_msg;
3365 /* More retries, send again. */
3367 /* Start with the max timer, set to normal
3368 timer after the message is sent. */
3369 ent->timeout = MAX_MSG_TIMEOUT;
3370 ent->retries_left--;
3371 spin_lock(&intf->counter_lock);
3372 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3373 intf->retransmitted_lan_commands++;
3375 intf->retransmitted_ipmb_commands++;
3376 spin_unlock(&intf->counter_lock);
3378 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3383 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3384 /* Send the new message. We send with a zero
3385 * priority. It timed out, I doubt time is
3386 * that critical now, and high priority
3387 * messages are really only for messages to the
3388 * local MC, which don't get resent. */
3389 intf->handlers->sender(intf->send_info,
3391 spin_lock_irqsave(&intf->seq_lock, *flags);
3395 static void ipmi_timeout_handler(long timeout_period)
3398 struct list_head timeouts;
3399 struct ipmi_recv_msg *msg, *msg2;
3400 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3401 unsigned long flags;
3404 INIT_LIST_HEAD(&timeouts);
3406 spin_lock(&interfaces_lock);
3407 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3408 intf = ipmi_interfaces[i];
3409 if (IPMI_INVALID_INTERFACE(intf))
3411 kref_get(&intf->refcount);
3412 spin_unlock(&interfaces_lock);
3414 /* See if any waiting messages need to be processed. */
3415 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3416 list_for_each_entry_safe(smi_msg, smi_msg2,
3417 &intf->waiting_msgs, link) {
3418 if (!handle_new_recv_msg(intf, smi_msg)) {
3419 list_del(&smi_msg->link);
3420 ipmi_free_smi_msg(smi_msg);
3422 /* To preserve message order, quit if we
3423 can't handle a message. */
3427 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3429 /* Go through the seq table and find any messages that
3430 have timed out, putting them in the timeouts
3432 spin_lock_irqsave(&intf->seq_lock, flags);
3433 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3434 check_msg_timeout(intf, &(intf->seq_table[j]),
3435 &timeouts, timeout_period, j,
3437 spin_unlock_irqrestore(&intf->seq_lock, flags);
3439 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3440 handle_msg_timeout(msg);
3442 kref_put(&intf->refcount, intf_free);
3443 spin_lock(&interfaces_lock);
3445 spin_unlock(&interfaces_lock);
3448 static void ipmi_request_event(void)
3453 spin_lock(&interfaces_lock);
3454 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3455 intf = ipmi_interfaces[i];
3456 if (IPMI_INVALID_INTERFACE(intf))
3459 intf->handlers->request_events(intf->send_info);
3461 spin_unlock(&interfaces_lock);
3464 static struct timer_list ipmi_timer;
3466 /* Call every ~100 ms. */
3467 #define IPMI_TIMEOUT_TIME 100
3469 /* How many jiffies does it take to get to the timeout time. */
3470 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3472 /* Request events from the queue every second (this is the number of
3473 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3474 future, IPMI will add a way to know immediately if an event is in
3475 the queue and this silliness can go away. */
3476 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3478 static atomic_t stop_operation;
3479 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3481 static void ipmi_timeout(unsigned long data)
3483 if (atomic_read(&stop_operation))
3487 if (ticks_to_req_ev == 0) {
3488 ipmi_request_event();
3489 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3492 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3494 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3498 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3499 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3501 /* FIXME - convert these to slabs. */
3502 static void free_smi_msg(struct ipmi_smi_msg *msg)
3504 atomic_dec(&smi_msg_inuse_count);
3508 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3510 struct ipmi_smi_msg *rv;
3511 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3513 rv->done = free_smi_msg;
3514 rv->user_data = NULL;
3515 atomic_inc(&smi_msg_inuse_count);
3520 static void free_recv_msg(struct ipmi_recv_msg *msg)
3522 atomic_dec(&recv_msg_inuse_count);
3526 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3528 struct ipmi_recv_msg *rv;
3530 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3533 rv->done = free_recv_msg;
3534 atomic_inc(&recv_msg_inuse_count);
3539 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3542 kref_put(&msg->user->refcount, free_user);
3546 #ifdef CONFIG_IPMI_PANIC_EVENT
3548 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3552 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3556 #ifdef CONFIG_IPMI_PANIC_STRING
3557 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3559 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3560 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3561 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3562 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3564 /* A get event receiver command, save it. */
3565 intf->event_receiver = msg->msg.data[1];
3566 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3570 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3572 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3573 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3574 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3575 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3577 /* A get device id command, save if we are an event
3578 receiver or generator. */
3579 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3580 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3585 static void send_panic_events(char *str)
3587 struct kernel_ipmi_msg msg;
3589 unsigned char data[16];
3591 struct ipmi_system_interface_addr *si;
3592 struct ipmi_addr addr;
3593 struct ipmi_smi_msg smi_msg;
3594 struct ipmi_recv_msg recv_msg;
3596 si = (struct ipmi_system_interface_addr *) &addr;
3597 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3598 si->channel = IPMI_BMC_CHANNEL;
3601 /* Fill in an event telling that we have failed. */
3602 msg.netfn = 0x04; /* Sensor or Event. */
3603 msg.cmd = 2; /* Platform event command. */
3606 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3607 data[1] = 0x03; /* This is for IPMI 1.0. */
3608 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3609 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3610 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3612 /* Put a few breadcrumbs in. Hopefully later we can add more things
3613 to make the panic events more useful. */
3620 smi_msg.done = dummy_smi_done_handler;
3621 recv_msg.done = dummy_recv_done_handler;
3623 /* For every registered interface, send the event. */
3624 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3625 intf = ipmi_interfaces[i];
3626 if (IPMI_INVALID_INTERFACE(intf))
3629 /* Send the event announcing the panic. */
3630 intf->handlers->set_run_to_completion(intf->send_info, 1);
3631 i_ipmi_request(NULL,
3640 intf->channels[0].address,
3641 intf->channels[0].lun,
3642 0, 1); /* Don't retry, and don't wait. */
3645 #ifdef CONFIG_IPMI_PANIC_STRING
3646 /* On every interface, dump a bunch of OEM event holding the
3651 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3653 struct ipmi_ipmb_addr *ipmb;
3656 intf = ipmi_interfaces[i];
3657 if (IPMI_INVALID_INTERFACE(intf))
3660 /* First job here is to figure out where to send the
3661 OEM events. There's no way in IPMI to send OEM
3662 events using an event send command, so we have to
3663 find the SEL to put them in and stick them in
3666 /* Get capabilities from the get device id. */
3667 intf->local_sel_device = 0;
3668 intf->local_event_generator = 0;
3669 intf->event_receiver = 0;
3671 /* Request the device info from the local MC. */
3672 msg.netfn = IPMI_NETFN_APP_REQUEST;
3673 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3676 intf->null_user_handler = device_id_fetcher;
3677 i_ipmi_request(NULL,
3686 intf->channels[0].address,
3687 intf->channels[0].lun,
3688 0, 1); /* Don't retry, and don't wait. */
3690 if (intf->local_event_generator) {
3691 /* Request the event receiver from the local MC. */
3692 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3693 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3696 intf->null_user_handler = event_receiver_fetcher;
3697 i_ipmi_request(NULL,
3706 intf->channels[0].address,
3707 intf->channels[0].lun,
3708 0, 1); /* no retry, and no wait. */
3710 intf->null_user_handler = NULL;
3712 /* Validate the event receiver. The low bit must not
3713 be 1 (it must be a valid IPMB address), it cannot
3714 be zero, and it must not be my address. */
3715 if (((intf->event_receiver & 1) == 0)
3716 && (intf->event_receiver != 0)
3717 && (intf->event_receiver != intf->channels[0].address))
3719 /* The event receiver is valid, send an IPMB
3721 ipmb = (struct ipmi_ipmb_addr *) &addr;
3722 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3723 ipmb->channel = 0; /* FIXME - is this right? */
3724 ipmb->lun = intf->event_receiver_lun;
3725 ipmb->slave_addr = intf->event_receiver;
3726 } else if (intf->local_sel_device) {
3727 /* The event receiver was not valid (or was
3728 me), but I am an SEL device, just dump it
3730 si = (struct ipmi_system_interface_addr *) &addr;
3731 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3732 si->channel = IPMI_BMC_CHANNEL;
3735 continue; /* No where to send the event. */
3738 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3739 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3745 int size = strlen(p);
3751 data[2] = 0xf0; /* OEM event without timestamp. */
3752 data[3] = intf->channels[0].address;
3753 data[4] = j++; /* sequence # */
3754 /* Always give 11 bytes, so strncpy will fill
3755 it with zeroes for me. */
3756 strncpy(data+5, p, 11);
3759 i_ipmi_request(NULL,
3768 intf->channels[0].address,
3769 intf->channels[0].lun,
3770 0, 1); /* no retry, and no wait. */
3773 #endif /* CONFIG_IPMI_PANIC_STRING */
3775 #endif /* CONFIG_IPMI_PANIC_EVENT */
3777 static int has_panicked = 0;
3779 static int panic_event(struct notifier_block *this,
3780 unsigned long event,
3790 /* For every registered interface, set it to run to completion. */
3791 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3792 intf = ipmi_interfaces[i];
3793 if (IPMI_INVALID_INTERFACE(intf))
3796 intf->handlers->set_run_to_completion(intf->send_info, 1);
3799 #ifdef CONFIG_IPMI_PANIC_EVENT
3800 send_panic_events(ptr);
3806 static struct notifier_block panic_block = {
3807 .notifier_call = panic_event,
3809 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3812 static int ipmi_init_msghandler(void)
3820 rv = driver_register(&ipmidriver);
3822 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3826 printk(KERN_INFO "ipmi message handler version "
3827 IPMI_DRIVER_VERSION "\n");
3829 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3830 ipmi_interfaces[i] = NULL;
3832 #ifdef CONFIG_PROC_FS
3833 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3834 if (!proc_ipmi_root) {
3835 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3839 proc_ipmi_root->owner = THIS_MODULE;
3840 #endif /* CONFIG_PROC_FS */
3842 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3843 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3845 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3852 static __init int ipmi_init_msghandler_mod(void)
3854 ipmi_init_msghandler();
3858 static __exit void cleanup_ipmi(void)
3865 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3867 /* This can't be called if any interfaces exist, so no worry about
3868 shutting down the interfaces. */
3870 /* Tell the timer to stop, then wait for it to stop. This avoids
3871 problems with race conditions removing the timer here. */
3872 atomic_inc(&stop_operation);
3873 del_timer_sync(&ipmi_timer);
3875 #ifdef CONFIG_PROC_FS
3876 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3877 #endif /* CONFIG_PROC_FS */
3879 driver_unregister(&ipmidriver);
3883 /* Check for buffer leaks. */
3884 count = atomic_read(&smi_msg_inuse_count);
3886 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3888 count = atomic_read(&recv_msg_inuse_count);
3890 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3893 module_exit(cleanup_ipmi);
3895 module_init(ipmi_init_msghandler_mod);
3896 MODULE_LICENSE("GPL");
3897 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3898 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3899 MODULE_VERSION(IPMI_DRIVER_VERSION);
3901 EXPORT_SYMBOL(ipmi_create_user);
3902 EXPORT_SYMBOL(ipmi_destroy_user);
3903 EXPORT_SYMBOL(ipmi_get_version);
3904 EXPORT_SYMBOL(ipmi_request_settime);
3905 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3906 EXPORT_SYMBOL(ipmi_register_smi);
3907 EXPORT_SYMBOL(ipmi_unregister_smi);
3908 EXPORT_SYMBOL(ipmi_register_for_cmd);
3909 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3910 EXPORT_SYMBOL(ipmi_smi_msg_received);
3911 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3912 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3913 EXPORT_SYMBOL(ipmi_addr_length);
3914 EXPORT_SYMBOL(ipmi_validate_addr);
3915 EXPORT_SYMBOL(ipmi_set_gets_events);
3916 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3917 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3918 EXPORT_SYMBOL(ipmi_set_my_address);
3919 EXPORT_SYMBOL(ipmi_get_my_address);
3920 EXPORT_SYMBOL(ipmi_set_my_LUN);
3921 EXPORT_SYMBOL(ipmi_get_my_LUN);
3922 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3923 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3924 EXPORT_SYMBOL(ipmi_free_recv_msg);