]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/char/ipmi/ipmi_si_intf.c
ASoC: omap-pcm: Move omap-pcm under include/sound
[karo-tx-linux.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/sched.h>
45 #include <linux/seq_file.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi.h>
61 #include <linux/ipmi_smi.h>
62 #include <asm/io.h>
63 #include "ipmi_si_sm.h"
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67 #include <linux/pnp.h>
68 #include <linux/of_device.h>
69 #include <linux/of_platform.h>
70 #include <linux/of_address.h>
71 #include <linux/of_irq.h>
72
73 #ifdef CONFIG_PARISC
74 #include <asm/hardware.h>       /* for register_parisc_driver() stuff */
75 #include <asm/parisc-device.h>
76 #endif
77
78 #define PFX "ipmi_si: "
79
80 /* Measure times between events in the driver. */
81 #undef DEBUG_TIMING
82
83 /* Call every 10 ms. */
84 #define SI_TIMEOUT_TIME_USEC    10000
85 #define SI_USEC_PER_JIFFY       (1000000/HZ)
86 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
87 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
88                                       short timeout */
89
90 enum si_intf_state {
91         SI_NORMAL,
92         SI_GETTING_FLAGS,
93         SI_GETTING_EVENTS,
94         SI_CLEARING_FLAGS,
95         SI_CLEARING_FLAGS_THEN_SET_IRQ,
96         SI_GETTING_MESSAGES,
97         SI_ENABLE_INTERRUPTS1,
98         SI_ENABLE_INTERRUPTS2,
99         SI_DISABLE_INTERRUPTS1,
100         SI_DISABLE_INTERRUPTS2
101         /* FIXME - add watchdog stuff. */
102 };
103
104 /* Some BT-specific defines we need here. */
105 #define IPMI_BT_INTMASK_REG             2
106 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
107 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
108
109 enum si_type {
110     SI_KCS, SI_SMIC, SI_BT
111 };
112 static char *si_to_str[] = { "kcs", "smic", "bt" };
113
114 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115                                         "ACPI", "SMBIOS", "PCI",
116                                         "device-tree", "default" };
117
118 #define DEVICE_NAME "ipmi_si"
119
120 static struct platform_driver ipmi_driver;
121
122 /*
123  * Indexes into stats[] in smi_info below.
124  */
125 enum si_stat_indexes {
126         /*
127          * Number of times the driver requested a timer while an operation
128          * was in progress.
129          */
130         SI_STAT_short_timeouts = 0,
131
132         /*
133          * Number of times the driver requested a timer while nothing was in
134          * progress.
135          */
136         SI_STAT_long_timeouts,
137
138         /* Number of times the interface was idle while being polled. */
139         SI_STAT_idles,
140
141         /* Number of interrupts the driver handled. */
142         SI_STAT_interrupts,
143
144         /* Number of time the driver got an ATTN from the hardware. */
145         SI_STAT_attentions,
146
147         /* Number of times the driver requested flags from the hardware. */
148         SI_STAT_flag_fetches,
149
150         /* Number of times the hardware didn't follow the state machine. */
151         SI_STAT_hosed_count,
152
153         /* Number of completed messages. */
154         SI_STAT_complete_transactions,
155
156         /* Number of IPMI events received from the hardware. */
157         SI_STAT_events,
158
159         /* Number of watchdog pretimeouts. */
160         SI_STAT_watchdog_pretimeouts,
161
162         /* Number of asynchronous messages received. */
163         SI_STAT_incoming_messages,
164
165
166         /* This *must* remain last, add new values above this. */
167         SI_NUM_STATS
168 };
169
170 struct smi_info {
171         int                    intf_num;
172         ipmi_smi_t             intf;
173         struct si_sm_data      *si_sm;
174         struct si_sm_handlers  *handlers;
175         enum si_type           si_type;
176         spinlock_t             si_lock;
177         struct list_head       xmit_msgs;
178         struct list_head       hp_xmit_msgs;
179         struct ipmi_smi_msg    *curr_msg;
180         enum si_intf_state     si_state;
181
182         /*
183          * Used to handle the various types of I/O that can occur with
184          * IPMI
185          */
186         struct si_sm_io io;
187         int (*io_setup)(struct smi_info *info);
188         void (*io_cleanup)(struct smi_info *info);
189         int (*irq_setup)(struct smi_info *info);
190         void (*irq_cleanup)(struct smi_info *info);
191         unsigned int io_size;
192         enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
193         void (*addr_source_cleanup)(struct smi_info *info);
194         void *addr_source_data;
195
196         /*
197          * Per-OEM handler, called from handle_flags().  Returns 1
198          * when handle_flags() needs to be re-run or 0 indicating it
199          * set si_state itself.
200          */
201         int (*oem_data_avail_handler)(struct smi_info *smi_info);
202
203         /*
204          * Flags from the last GET_MSG_FLAGS command, used when an ATTN
205          * is set to hold the flags until we are done handling everything
206          * from the flags.
207          */
208 #define RECEIVE_MSG_AVAIL       0x01
209 #define EVENT_MSG_BUFFER_FULL   0x02
210 #define WDT_PRE_TIMEOUT_INT     0x08
211 #define OEM0_DATA_AVAIL     0x20
212 #define OEM1_DATA_AVAIL     0x40
213 #define OEM2_DATA_AVAIL     0x80
214 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
215                              OEM1_DATA_AVAIL | \
216                              OEM2_DATA_AVAIL)
217         unsigned char       msg_flags;
218
219         /* Does the BMC have an event buffer? */
220         char                has_event_buffer;
221
222         /*
223          * If set to true, this will request events the next time the
224          * state machine is idle.
225          */
226         atomic_t            req_events;
227
228         /*
229          * If true, run the state machine to completion on every send
230          * call.  Generally used after a panic to make sure stuff goes
231          * out.
232          */
233         int                 run_to_completion;
234
235         /* The I/O port of an SI interface. */
236         int                 port;
237
238         /*
239          * The space between start addresses of the two ports.  For
240          * instance, if the first port is 0xca2 and the spacing is 4, then
241          * the second port is 0xca6.
242          */
243         unsigned int        spacing;
244
245         /* zero if no irq; */
246         int                 irq;
247
248         /* The timer for this si. */
249         struct timer_list   si_timer;
250
251         /* The time (in jiffies) the last timeout occurred at. */
252         unsigned long       last_timeout_jiffies;
253
254         /* Used to gracefully stop the timer without race conditions. */
255         atomic_t            stop_operation;
256
257         /*
258          * The driver will disable interrupts when it gets into a
259          * situation where it cannot handle messages due to lack of
260          * memory.  Once that situation clears up, it will re-enable
261          * interrupts.
262          */
263         int interrupt_disabled;
264
265         /* From the get device id response... */
266         struct ipmi_device_id device_id;
267
268         /* Driver model stuff. */
269         struct device *dev;
270         struct platform_device *pdev;
271
272         /*
273          * True if we allocated the device, false if it came from
274          * someplace else (like PCI).
275          */
276         int dev_registered;
277
278         /* Slave address, could be reported from DMI. */
279         unsigned char slave_addr;
280
281         /* Counters and things for the proc filesystem. */
282         atomic_t stats[SI_NUM_STATS];
283
284         struct task_struct *thread;
285
286         struct list_head link;
287         union ipmi_smi_info_union addr_info;
288 };
289
290 #define smi_inc_stat(smi, stat) \
291         atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
292 #define smi_get_stat(smi, stat) \
293         ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
294
295 #define SI_MAX_PARMS 4
296
297 static int force_kipmid[SI_MAX_PARMS];
298 static int num_force_kipmid;
299 #ifdef CONFIG_PCI
300 static int pci_registered;
301 #endif
302 #ifdef CONFIG_ACPI
303 static int pnp_registered;
304 #endif
305 #ifdef CONFIG_PARISC
306 static int parisc_registered;
307 #endif
308
309 static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
310 static int num_max_busy_us;
311
312 static int unload_when_empty = 1;
313
314 static int add_smi(struct smi_info *smi);
315 static int try_smi_init(struct smi_info *smi);
316 static void cleanup_one_si(struct smi_info *to_clean);
317 static void cleanup_ipmi_si(void);
318
319 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
320 static int register_xaction_notifier(struct notifier_block *nb)
321 {
322         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
323 }
324
325 static void deliver_recv_msg(struct smi_info *smi_info,
326                              struct ipmi_smi_msg *msg)
327 {
328         /* Deliver the message to the upper layer. */
329         ipmi_smi_msg_received(smi_info->intf, msg);
330 }
331
332 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
333 {
334         struct ipmi_smi_msg *msg = smi_info->curr_msg;
335
336         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
337                 cCode = IPMI_ERR_UNSPECIFIED;
338         /* else use it as is */
339
340         /* Make it a response */
341         msg->rsp[0] = msg->data[0] | 4;
342         msg->rsp[1] = msg->data[1];
343         msg->rsp[2] = cCode;
344         msg->rsp_size = 3;
345
346         smi_info->curr_msg = NULL;
347         deliver_recv_msg(smi_info, msg);
348 }
349
350 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
351 {
352         int              rv;
353         struct list_head *entry = NULL;
354 #ifdef DEBUG_TIMING
355         struct timeval t;
356 #endif
357
358         /* Pick the high priority queue first. */
359         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
360                 entry = smi_info->hp_xmit_msgs.next;
361         } else if (!list_empty(&(smi_info->xmit_msgs))) {
362                 entry = smi_info->xmit_msgs.next;
363         }
364
365         if (!entry) {
366                 smi_info->curr_msg = NULL;
367                 rv = SI_SM_IDLE;
368         } else {
369                 int err;
370
371                 list_del(entry);
372                 smi_info->curr_msg = list_entry(entry,
373                                                 struct ipmi_smi_msg,
374                                                 link);
375 #ifdef DEBUG_TIMING
376                 do_gettimeofday(&t);
377                 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
378 #endif
379                 err = atomic_notifier_call_chain(&xaction_notifier_list,
380                                 0, smi_info);
381                 if (err & NOTIFY_STOP_MASK) {
382                         rv = SI_SM_CALL_WITHOUT_DELAY;
383                         goto out;
384                 }
385                 err = smi_info->handlers->start_transaction(
386                         smi_info->si_sm,
387                         smi_info->curr_msg->data,
388                         smi_info->curr_msg->data_size);
389                 if (err)
390                         return_hosed_msg(smi_info, err);
391
392                 rv = SI_SM_CALL_WITHOUT_DELAY;
393         }
394  out:
395         return rv;
396 }
397
398 static void start_enable_irq(struct smi_info *smi_info)
399 {
400         unsigned char msg[2];
401
402         /*
403          * If we are enabling interrupts, we have to tell the
404          * BMC to use them.
405          */
406         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
407         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
408
409         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
410         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
411 }
412
413 static void start_disable_irq(struct smi_info *smi_info)
414 {
415         unsigned char msg[2];
416
417         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
418         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
419
420         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
421         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
422 }
423
424 static void start_clear_flags(struct smi_info *smi_info)
425 {
426         unsigned char msg[3];
427
428         /* Make sure the watchdog pre-timeout flag is not set at startup. */
429         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
430         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
431         msg[2] = WDT_PRE_TIMEOUT_INT;
432
433         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
434         smi_info->si_state = SI_CLEARING_FLAGS;
435 }
436
437 /*
438  * When we have a situtaion where we run out of memory and cannot
439  * allocate messages, we just leave them in the BMC and run the system
440  * polled until we can allocate some memory.  Once we have some
441  * memory, we will re-enable the interrupt.
442  */
443 static inline void disable_si_irq(struct smi_info *smi_info)
444 {
445         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446                 start_disable_irq(smi_info);
447                 smi_info->interrupt_disabled = 1;
448                 if (!atomic_read(&smi_info->stop_operation))
449                         mod_timer(&smi_info->si_timer,
450                                   jiffies + SI_TIMEOUT_JIFFIES);
451         }
452 }
453
454 static inline void enable_si_irq(struct smi_info *smi_info)
455 {
456         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
457                 start_enable_irq(smi_info);
458                 smi_info->interrupt_disabled = 0;
459         }
460 }
461
462 static void handle_flags(struct smi_info *smi_info)
463 {
464  retry:
465         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
466                 /* Watchdog pre-timeout */
467                 smi_inc_stat(smi_info, watchdog_pretimeouts);
468
469                 start_clear_flags(smi_info);
470                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
471                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
472         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
473                 /* Messages available. */
474                 smi_info->curr_msg = ipmi_alloc_smi_msg();
475                 if (!smi_info->curr_msg) {
476                         disable_si_irq(smi_info);
477                         smi_info->si_state = SI_NORMAL;
478                         return;
479                 }
480                 enable_si_irq(smi_info);
481
482                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
483                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
484                 smi_info->curr_msg->data_size = 2;
485
486                 smi_info->handlers->start_transaction(
487                         smi_info->si_sm,
488                         smi_info->curr_msg->data,
489                         smi_info->curr_msg->data_size);
490                 smi_info->si_state = SI_GETTING_MESSAGES;
491         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
492                 /* Events available. */
493                 smi_info->curr_msg = ipmi_alloc_smi_msg();
494                 if (!smi_info->curr_msg) {
495                         disable_si_irq(smi_info);
496                         smi_info->si_state = SI_NORMAL;
497                         return;
498                 }
499                 enable_si_irq(smi_info);
500
501                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
502                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
503                 smi_info->curr_msg->data_size = 2;
504
505                 smi_info->handlers->start_transaction(
506                         smi_info->si_sm,
507                         smi_info->curr_msg->data,
508                         smi_info->curr_msg->data_size);
509                 smi_info->si_state = SI_GETTING_EVENTS;
510         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
511                    smi_info->oem_data_avail_handler) {
512                 if (smi_info->oem_data_avail_handler(smi_info))
513                         goto retry;
514         } else
515                 smi_info->si_state = SI_NORMAL;
516 }
517
518 static void handle_transaction_done(struct smi_info *smi_info)
519 {
520         struct ipmi_smi_msg *msg;
521 #ifdef DEBUG_TIMING
522         struct timeval t;
523
524         do_gettimeofday(&t);
525         printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
526 #endif
527         switch (smi_info->si_state) {
528         case SI_NORMAL:
529                 if (!smi_info->curr_msg)
530                         break;
531
532                 smi_info->curr_msg->rsp_size
533                         = smi_info->handlers->get_result(
534                                 smi_info->si_sm,
535                                 smi_info->curr_msg->rsp,
536                                 IPMI_MAX_MSG_LENGTH);
537
538                 /*
539                  * Do this here becase deliver_recv_msg() releases the
540                  * lock, and a new message can be put in during the
541                  * time the lock is released.
542                  */
543                 msg = smi_info->curr_msg;
544                 smi_info->curr_msg = NULL;
545                 deliver_recv_msg(smi_info, msg);
546                 break;
547
548         case SI_GETTING_FLAGS:
549         {
550                 unsigned char msg[4];
551                 unsigned int  len;
552
553                 /* We got the flags from the SMI, now handle them. */
554                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
555                 if (msg[2] != 0) {
556                         /* Error fetching flags, just give up for now. */
557                         smi_info->si_state = SI_NORMAL;
558                 } else if (len < 4) {
559                         /*
560                          * Hmm, no flags.  That's technically illegal, but
561                          * don't use uninitialized data.
562                          */
563                         smi_info->si_state = SI_NORMAL;
564                 } else {
565                         smi_info->msg_flags = msg[3];
566                         handle_flags(smi_info);
567                 }
568                 break;
569         }
570
571         case SI_CLEARING_FLAGS:
572         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
573         {
574                 unsigned char msg[3];
575
576                 /* We cleared the flags. */
577                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
578                 if (msg[2] != 0) {
579                         /* Error clearing flags */
580                         dev_warn(smi_info->dev,
581                                  "Error clearing flags: %2.2x\n", msg[2]);
582                 }
583                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
584                         start_enable_irq(smi_info);
585                 else
586                         smi_info->si_state = SI_NORMAL;
587                 break;
588         }
589
590         case SI_GETTING_EVENTS:
591         {
592                 smi_info->curr_msg->rsp_size
593                         = smi_info->handlers->get_result(
594                                 smi_info->si_sm,
595                                 smi_info->curr_msg->rsp,
596                                 IPMI_MAX_MSG_LENGTH);
597
598                 /*
599                  * Do this here becase deliver_recv_msg() releases the
600                  * lock, and a new message can be put in during the
601                  * time the lock is released.
602                  */
603                 msg = smi_info->curr_msg;
604                 smi_info->curr_msg = NULL;
605                 if (msg->rsp[2] != 0) {
606                         /* Error getting event, probably done. */
607                         msg->done(msg);
608
609                         /* Take off the event flag. */
610                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
611                         handle_flags(smi_info);
612                 } else {
613                         smi_inc_stat(smi_info, events);
614
615                         /*
616                          * Do this before we deliver the message
617                          * because delivering the message releases the
618                          * lock and something else can mess with the
619                          * state.
620                          */
621                         handle_flags(smi_info);
622
623                         deliver_recv_msg(smi_info, msg);
624                 }
625                 break;
626         }
627
628         case SI_GETTING_MESSAGES:
629         {
630                 smi_info->curr_msg->rsp_size
631                         = smi_info->handlers->get_result(
632                                 smi_info->si_sm,
633                                 smi_info->curr_msg->rsp,
634                                 IPMI_MAX_MSG_LENGTH);
635
636                 /*
637                  * Do this here becase deliver_recv_msg() releases the
638                  * lock, and a new message can be put in during the
639                  * time the lock is released.
640                  */
641                 msg = smi_info->curr_msg;
642                 smi_info->curr_msg = NULL;
643                 if (msg->rsp[2] != 0) {
644                         /* Error getting event, probably done. */
645                         msg->done(msg);
646
647                         /* Take off the msg flag. */
648                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
649                         handle_flags(smi_info);
650                 } else {
651                         smi_inc_stat(smi_info, incoming_messages);
652
653                         /*
654                          * Do this before we deliver the message
655                          * because delivering the message releases the
656                          * lock and something else can mess with the
657                          * state.
658                          */
659                         handle_flags(smi_info);
660
661                         deliver_recv_msg(smi_info, msg);
662                 }
663                 break;
664         }
665
666         case SI_ENABLE_INTERRUPTS1:
667         {
668                 unsigned char msg[4];
669
670                 /* We got the flags from the SMI, now handle them. */
671                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
672                 if (msg[2] != 0) {
673                         dev_warn(smi_info->dev,
674                                  "Couldn't get irq info: %x.\n", msg[2]);
675                         dev_warn(smi_info->dev,
676                                  "Maybe ok, but ipmi might run very slowly.\n");
677                         smi_info->si_state = SI_NORMAL;
678                 } else {
679                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
680                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
681                         msg[2] = (msg[3] |
682                                   IPMI_BMC_RCV_MSG_INTR |
683                                   IPMI_BMC_EVT_MSG_INTR);
684                         smi_info->handlers->start_transaction(
685                                 smi_info->si_sm, msg, 3);
686                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
687                 }
688                 break;
689         }
690
691         case SI_ENABLE_INTERRUPTS2:
692         {
693                 unsigned char msg[4];
694
695                 /* We got the flags from the SMI, now handle them. */
696                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
697                 if (msg[2] != 0) {
698                         dev_warn(smi_info->dev,
699                                  "Couldn't set irq info: %x.\n", msg[2]);
700                         dev_warn(smi_info->dev,
701                                  "Maybe ok, but ipmi might run very slowly.\n");
702                 } else
703                         smi_info->interrupt_disabled = 0;
704                 smi_info->si_state = SI_NORMAL;
705                 break;
706         }
707
708         case SI_DISABLE_INTERRUPTS1:
709         {
710                 unsigned char msg[4];
711
712                 /* We got the flags from the SMI, now handle them. */
713                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
714                 if (msg[2] != 0) {
715                         dev_warn(smi_info->dev, "Could not disable interrupts"
716                                  ", failed get.\n");
717                         smi_info->si_state = SI_NORMAL;
718                 } else {
719                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
720                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
721                         msg[2] = (msg[3] &
722                                   ~(IPMI_BMC_RCV_MSG_INTR |
723                                     IPMI_BMC_EVT_MSG_INTR));
724                         smi_info->handlers->start_transaction(
725                                 smi_info->si_sm, msg, 3);
726                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
727                 }
728                 break;
729         }
730
731         case SI_DISABLE_INTERRUPTS2:
732         {
733                 unsigned char msg[4];
734
735                 /* We got the flags from the SMI, now handle them. */
736                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
737                 if (msg[2] != 0) {
738                         dev_warn(smi_info->dev, "Could not disable interrupts"
739                                  ", failed set.\n");
740                 }
741                 smi_info->si_state = SI_NORMAL;
742                 break;
743         }
744         }
745 }
746
747 /*
748  * Called on timeouts and events.  Timeouts should pass the elapsed
749  * time, interrupts should pass in zero.  Must be called with
750  * si_lock held and interrupts disabled.
751  */
752 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
753                                            int time)
754 {
755         enum si_sm_result si_sm_result;
756
757  restart:
758         /*
759          * There used to be a loop here that waited a little while
760          * (around 25us) before giving up.  That turned out to be
761          * pointless, the minimum delays I was seeing were in the 300us
762          * range, which is far too long to wait in an interrupt.  So
763          * we just run until the state machine tells us something
764          * happened or it needs a delay.
765          */
766         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
767         time = 0;
768         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
769                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
770
771         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
772                 smi_inc_stat(smi_info, complete_transactions);
773
774                 handle_transaction_done(smi_info);
775                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
776         } else if (si_sm_result == SI_SM_HOSED) {
777                 smi_inc_stat(smi_info, hosed_count);
778
779                 /*
780                  * Do the before return_hosed_msg, because that
781                  * releases the lock.
782                  */
783                 smi_info->si_state = SI_NORMAL;
784                 if (smi_info->curr_msg != NULL) {
785                         /*
786                          * If we were handling a user message, format
787                          * a response to send to the upper layer to
788                          * tell it about the error.
789                          */
790                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
791                 }
792                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
793         }
794
795         /*
796          * We prefer handling attn over new messages.  But don't do
797          * this if there is not yet an upper layer to handle anything.
798          */
799         if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
800                 unsigned char msg[2];
801
802                 smi_inc_stat(smi_info, attentions);
803
804                 /*
805                  * Got a attn, send down a get message flags to see
806                  * what's causing it.  It would be better to handle
807                  * this in the upper layer, but due to the way
808                  * interrupts work with the SMI, that's not really
809                  * possible.
810                  */
811                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
812                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
813
814                 smi_info->handlers->start_transaction(
815                         smi_info->si_sm, msg, 2);
816                 smi_info->si_state = SI_GETTING_FLAGS;
817                 goto restart;
818         }
819
820         /* If we are currently idle, try to start the next message. */
821         if (si_sm_result == SI_SM_IDLE) {
822                 smi_inc_stat(smi_info, idles);
823
824                 si_sm_result = start_next_msg(smi_info);
825                 if (si_sm_result != SI_SM_IDLE)
826                         goto restart;
827         }
828
829         if ((si_sm_result == SI_SM_IDLE)
830             && (atomic_read(&smi_info->req_events))) {
831                 /*
832                  * We are idle and the upper layer requested that I fetch
833                  * events, so do so.
834                  */
835                 atomic_set(&smi_info->req_events, 0);
836
837                 smi_info->curr_msg = ipmi_alloc_smi_msg();
838                 if (!smi_info->curr_msg)
839                         goto out;
840
841                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
842                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
843                 smi_info->curr_msg->data_size = 2;
844
845                 smi_info->handlers->start_transaction(
846                         smi_info->si_sm,
847                         smi_info->curr_msg->data,
848                         smi_info->curr_msg->data_size);
849                 smi_info->si_state = SI_GETTING_EVENTS;
850                 goto restart;
851         }
852  out:
853         return si_sm_result;
854 }
855
856 static void sender(void                *send_info,
857                    struct ipmi_smi_msg *msg,
858                    int                 priority)
859 {
860         struct smi_info   *smi_info = send_info;
861         enum si_sm_result result;
862         unsigned long     flags;
863 #ifdef DEBUG_TIMING
864         struct timeval    t;
865 #endif
866
867         if (atomic_read(&smi_info->stop_operation)) {
868                 msg->rsp[0] = msg->data[0] | 4;
869                 msg->rsp[1] = msg->data[1];
870                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
871                 msg->rsp_size = 3;
872                 deliver_recv_msg(smi_info, msg);
873                 return;
874         }
875
876 #ifdef DEBUG_TIMING
877         do_gettimeofday(&t);
878         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
879 #endif
880
881         if (smi_info->run_to_completion) {
882                 /*
883                  * If we are running to completion, then throw it in
884                  * the list and run transactions until everything is
885                  * clear.  Priority doesn't matter here.
886                  */
887
888                 /*
889                  * Run to completion means we are single-threaded, no
890                  * need for locks.
891                  */
892                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
893
894                 result = smi_event_handler(smi_info, 0);
895                 while (result != SI_SM_IDLE) {
896                         udelay(SI_SHORT_TIMEOUT_USEC);
897                         result = smi_event_handler(smi_info,
898                                                    SI_SHORT_TIMEOUT_USEC);
899                 }
900                 return;
901         }
902
903         spin_lock_irqsave(&smi_info->si_lock, flags);
904         if (priority > 0)
905                 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
906         else
907                 list_add_tail(&msg->link, &smi_info->xmit_msgs);
908
909         if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
910                 /*
911                  * last_timeout_jiffies is updated here to avoid
912                  * smi_timeout() handler passing very large time_diff
913                  * value to smi_event_handler() that causes
914                  * the send command to abort.
915                  */
916                 smi_info->last_timeout_jiffies = jiffies;
917
918                 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
919
920                 if (smi_info->thread)
921                         wake_up_process(smi_info->thread);
922
923                 start_next_msg(smi_info);
924                 smi_event_handler(smi_info, 0);
925         }
926         spin_unlock_irqrestore(&smi_info->si_lock, flags);
927 }
928
929 static void set_run_to_completion(void *send_info, int i_run_to_completion)
930 {
931         struct smi_info   *smi_info = send_info;
932         enum si_sm_result result;
933
934         smi_info->run_to_completion = i_run_to_completion;
935         if (i_run_to_completion) {
936                 result = smi_event_handler(smi_info, 0);
937                 while (result != SI_SM_IDLE) {
938                         udelay(SI_SHORT_TIMEOUT_USEC);
939                         result = smi_event_handler(smi_info,
940                                                    SI_SHORT_TIMEOUT_USEC);
941                 }
942         }
943 }
944
945 /*
946  * Use -1 in the nsec value of the busy waiting timespec to tell that
947  * we are spinning in kipmid looking for something and not delaying
948  * between checks
949  */
950 static inline void ipmi_si_set_not_busy(struct timespec *ts)
951 {
952         ts->tv_nsec = -1;
953 }
954 static inline int ipmi_si_is_busy(struct timespec *ts)
955 {
956         return ts->tv_nsec != -1;
957 }
958
959 static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
960                                  const struct smi_info *smi_info,
961                                  struct timespec *busy_until)
962 {
963         unsigned int max_busy_us = 0;
964
965         if (smi_info->intf_num < num_max_busy_us)
966                 max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
967         if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
968                 ipmi_si_set_not_busy(busy_until);
969         else if (!ipmi_si_is_busy(busy_until)) {
970                 getnstimeofday(busy_until);
971                 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
972         } else {
973                 struct timespec now;
974                 getnstimeofday(&now);
975                 if (unlikely(timespec_compare(&now, busy_until) > 0)) {
976                         ipmi_si_set_not_busy(busy_until);
977                         return 0;
978                 }
979         }
980         return 1;
981 }
982
983
984 /*
985  * A busy-waiting loop for speeding up IPMI operation.
986  *
987  * Lousy hardware makes this hard.  This is only enabled for systems
988  * that are not BT and do not have interrupts.  It starts spinning
989  * when an operation is complete or until max_busy tells it to stop
990  * (if that is enabled).  See the paragraph on kimid_max_busy_us in
991  * Documentation/IPMI.txt for details.
992  */
993 static int ipmi_thread(void *data)
994 {
995         struct smi_info *smi_info = data;
996         unsigned long flags;
997         enum si_sm_result smi_result;
998         struct timespec busy_until;
999
1000         ipmi_si_set_not_busy(&busy_until);
1001         set_user_nice(current, 19);
1002         while (!kthread_should_stop()) {
1003                 int busy_wait;
1004
1005                 spin_lock_irqsave(&(smi_info->si_lock), flags);
1006                 smi_result = smi_event_handler(smi_info, 0);
1007                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1008                 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1009                                                   &busy_until);
1010                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1011                         ; /* do nothing */
1012                 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1013                         schedule();
1014                 else if (smi_result == SI_SM_IDLE)
1015                         schedule_timeout_interruptible(100);
1016                 else
1017                         schedule_timeout_interruptible(1);
1018         }
1019         return 0;
1020 }
1021
1022
1023 static void poll(void *send_info)
1024 {
1025         struct smi_info *smi_info = send_info;
1026         unsigned long flags = 0;
1027         int run_to_completion = smi_info->run_to_completion;
1028
1029         /*
1030          * Make sure there is some delay in the poll loop so we can
1031          * drive time forward and timeout things.
1032          */
1033         udelay(10);
1034         if (!run_to_completion)
1035                 spin_lock_irqsave(&smi_info->si_lock, flags);
1036         smi_event_handler(smi_info, 10);
1037         if (!run_to_completion)
1038                 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1039 }
1040
1041 static void request_events(void *send_info)
1042 {
1043         struct smi_info *smi_info = send_info;
1044
1045         if (atomic_read(&smi_info->stop_operation) ||
1046                                 !smi_info->has_event_buffer)
1047                 return;
1048
1049         atomic_set(&smi_info->req_events, 1);
1050 }
1051
1052 static int initialized;
1053
1054 static void smi_timeout(unsigned long data)
1055 {
1056         struct smi_info   *smi_info = (struct smi_info *) data;
1057         enum si_sm_result smi_result;
1058         unsigned long     flags;
1059         unsigned long     jiffies_now;
1060         long              time_diff;
1061         long              timeout;
1062 #ifdef DEBUG_TIMING
1063         struct timeval    t;
1064 #endif
1065
1066         spin_lock_irqsave(&(smi_info->si_lock), flags);
1067 #ifdef DEBUG_TIMING
1068         do_gettimeofday(&t);
1069         printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1070 #endif
1071         jiffies_now = jiffies;
1072         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1073                      * SI_USEC_PER_JIFFY);
1074         smi_result = smi_event_handler(smi_info, time_diff);
1075
1076         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1077
1078         smi_info->last_timeout_jiffies = jiffies_now;
1079
1080         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1081                 /* Running with interrupts, only do long timeouts. */
1082                 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1083                 smi_inc_stat(smi_info, long_timeouts);
1084                 goto do_mod_timer;
1085         }
1086
1087         /*
1088          * If the state machine asks for a short delay, then shorten
1089          * the timer timeout.
1090          */
1091         if (smi_result == SI_SM_CALL_WITH_DELAY) {
1092                 smi_inc_stat(smi_info, short_timeouts);
1093                 timeout = jiffies + 1;
1094         } else {
1095                 smi_inc_stat(smi_info, long_timeouts);
1096                 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1097         }
1098
1099  do_mod_timer:
1100         if (smi_result != SI_SM_IDLE)
1101                 mod_timer(&(smi_info->si_timer), timeout);
1102 }
1103
1104 static irqreturn_t si_irq_handler(int irq, void *data)
1105 {
1106         struct smi_info *smi_info = data;
1107         unsigned long   flags;
1108 #ifdef DEBUG_TIMING
1109         struct timeval  t;
1110 #endif
1111
1112         spin_lock_irqsave(&(smi_info->si_lock), flags);
1113
1114         smi_inc_stat(smi_info, interrupts);
1115
1116 #ifdef DEBUG_TIMING
1117         do_gettimeofday(&t);
1118         printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1119 #endif
1120         smi_event_handler(smi_info, 0);
1121         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1122         return IRQ_HANDLED;
1123 }
1124
1125 static irqreturn_t si_bt_irq_handler(int irq, void *data)
1126 {
1127         struct smi_info *smi_info = data;
1128         /* We need to clear the IRQ flag for the BT interface. */
1129         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1130                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1131                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1132         return si_irq_handler(irq, data);
1133 }
1134
1135 static int smi_start_processing(void       *send_info,
1136                                 ipmi_smi_t intf)
1137 {
1138         struct smi_info *new_smi = send_info;
1139         int             enable = 0;
1140
1141         new_smi->intf = intf;
1142
1143         /* Try to claim any interrupts. */
1144         if (new_smi->irq_setup)
1145                 new_smi->irq_setup(new_smi);
1146
1147         /* Set up the timer that drives the interface. */
1148         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1149         new_smi->last_timeout_jiffies = jiffies;
1150         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1151
1152         /*
1153          * Check if the user forcefully enabled the daemon.
1154          */
1155         if (new_smi->intf_num < num_force_kipmid)
1156                 enable = force_kipmid[new_smi->intf_num];
1157         /*
1158          * The BT interface is efficient enough to not need a thread,
1159          * and there is no need for a thread if we have interrupts.
1160          */
1161         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1162                 enable = 1;
1163
1164         if (enable) {
1165                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1166                                               "kipmi%d", new_smi->intf_num);
1167                 if (IS_ERR(new_smi->thread)) {
1168                         dev_notice(new_smi->dev, "Could not start"
1169                                    " kernel thread due to error %ld, only using"
1170                                    " timers to drive the interface\n",
1171                                    PTR_ERR(new_smi->thread));
1172                         new_smi->thread = NULL;
1173                 }
1174         }
1175
1176         return 0;
1177 }
1178
1179 static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1180 {
1181         struct smi_info *smi = send_info;
1182
1183         data->addr_src = smi->addr_source;
1184         data->dev = smi->dev;
1185         data->addr_info = smi->addr_info;
1186         get_device(smi->dev);
1187
1188         return 0;
1189 }
1190
1191 static void set_maintenance_mode(void *send_info, int enable)
1192 {
1193         struct smi_info   *smi_info = send_info;
1194
1195         if (!enable)
1196                 atomic_set(&smi_info->req_events, 0);
1197 }
1198
1199 static struct ipmi_smi_handlers handlers = {
1200         .owner                  = THIS_MODULE,
1201         .start_processing       = smi_start_processing,
1202         .get_smi_info           = get_smi_info,
1203         .sender                 = sender,
1204         .request_events         = request_events,
1205         .set_maintenance_mode   = set_maintenance_mode,
1206         .set_run_to_completion  = set_run_to_completion,
1207         .poll                   = poll,
1208 };
1209
1210 /*
1211  * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1212  * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
1213  */
1214
1215 static LIST_HEAD(smi_infos);
1216 static DEFINE_MUTEX(smi_infos_lock);
1217 static int smi_num; /* Used to sequence the SMIs */
1218
1219 #define DEFAULT_REGSPACING      1
1220 #define DEFAULT_REGSIZE         1
1221
1222 #ifdef CONFIG_ACPI
1223 static bool          si_tryacpi = 1;
1224 #endif
1225 #ifdef CONFIG_DMI
1226 static bool          si_trydmi = 1;
1227 #endif
1228 static bool          si_tryplatform = 1;
1229 #ifdef CONFIG_PCI
1230 static bool          si_trypci = 1;
1231 #endif
1232 static bool          si_trydefaults = 1;
1233 static char          *si_type[SI_MAX_PARMS];
1234 #define MAX_SI_TYPE_STR 30
1235 static char          si_type_str[MAX_SI_TYPE_STR];
1236 static unsigned long addrs[SI_MAX_PARMS];
1237 static unsigned int num_addrs;
1238 static unsigned int  ports[SI_MAX_PARMS];
1239 static unsigned int num_ports;
1240 static int           irqs[SI_MAX_PARMS];
1241 static unsigned int num_irqs;
1242 static int           regspacings[SI_MAX_PARMS];
1243 static unsigned int num_regspacings;
1244 static int           regsizes[SI_MAX_PARMS];
1245 static unsigned int num_regsizes;
1246 static int           regshifts[SI_MAX_PARMS];
1247 static unsigned int num_regshifts;
1248 static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1249 static unsigned int num_slave_addrs;
1250
1251 #define IPMI_IO_ADDR_SPACE  0
1252 #define IPMI_MEM_ADDR_SPACE 1
1253 static char *addr_space_to_str[] = { "i/o", "mem" };
1254
1255 static int hotmod_handler(const char *val, struct kernel_param *kp);
1256
1257 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1258 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1259                  " Documentation/IPMI.txt in the kernel sources for the"
1260                  " gory details.");
1261
1262 #ifdef CONFIG_ACPI
1263 module_param_named(tryacpi, si_tryacpi, bool, 0);
1264 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1265                  " default scan of the interfaces identified via ACPI");
1266 #endif
1267 #ifdef CONFIG_DMI
1268 module_param_named(trydmi, si_trydmi, bool, 0);
1269 MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
1270                  " default scan of the interfaces identified via DMI");
1271 #endif
1272 module_param_named(tryplatform, si_tryplatform, bool, 0);
1273 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1274                  " default scan of the interfaces identified via platform"
1275                  " interfaces like openfirmware");
1276 #ifdef CONFIG_PCI
1277 module_param_named(trypci, si_trypci, bool, 0);
1278 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1279                  " default scan of the interfaces identified via pci");
1280 #endif
1281 module_param_named(trydefaults, si_trydefaults, bool, 0);
1282 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1283                  " default scan of the KCS and SMIC interface at the standard"
1284                  " address");
1285 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1286 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1287                  " interface separated by commas.  The types are 'kcs',"
1288                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1289                  " the first interface to kcs and the second to bt");
1290 module_param_array(addrs, ulong, &num_addrs, 0);
1291 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1292                  " addresses separated by commas.  Only use if an interface"
1293                  " is in memory.  Otherwise, set it to zero or leave"
1294                  " it blank.");
1295 module_param_array(ports, uint, &num_ports, 0);
1296 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1297                  " addresses separated by commas.  Only use if an interface"
1298                  " is a port.  Otherwise, set it to zero or leave"
1299                  " it blank.");
1300 module_param_array(irqs, int, &num_irqs, 0);
1301 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1302                  " addresses separated by commas.  Only use if an interface"
1303                  " has an interrupt.  Otherwise, set it to zero or leave"
1304                  " it blank.");
1305 module_param_array(regspacings, int, &num_regspacings, 0);
1306 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1307                  " and each successive register used by the interface.  For"
1308                  " instance, if the start address is 0xca2 and the spacing"
1309                  " is 2, then the second address is at 0xca4.  Defaults"
1310                  " to 1.");
1311 module_param_array(regsizes, int, &num_regsizes, 0);
1312 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1313                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1314                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1315                  " the 8-bit IPMI register has to be read from a larger"
1316                  " register.");
1317 module_param_array(regshifts, int, &num_regshifts, 0);
1318 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1319                  " IPMI register, in bits.  For instance, if the data"
1320                  " is read from a 32-bit word and the IPMI data is in"
1321                  " bit 8-15, then the shift would be 8");
1322 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1323 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1324                  " the controller.  Normally this is 0x20, but can be"
1325                  " overridden by this parm.  This is an array indexed"
1326                  " by interface number.");
1327 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1328 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1329                  " disabled(0).  Normally the IPMI driver auto-detects"
1330                  " this, but the value may be overridden by this parm.");
1331 module_param(unload_when_empty, int, 0);
1332 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1333                  " specified or found, default is 1.  Setting to 0"
1334                  " is useful for hot add of devices using hotmod.");
1335 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1336 MODULE_PARM_DESC(kipmid_max_busy_us,
1337                  "Max time (in microseconds) to busy-wait for IPMI data before"
1338                  " sleeping. 0 (default) means to wait forever. Set to 100-500"
1339                  " if kipmid is using up a lot of CPU time.");
1340
1341
1342 static void std_irq_cleanup(struct smi_info *info)
1343 {
1344         if (info->si_type == SI_BT)
1345                 /* Disable the interrupt in the BT interface. */
1346                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1347         free_irq(info->irq, info);
1348 }
1349
1350 static int std_irq_setup(struct smi_info *info)
1351 {
1352         int rv;
1353
1354         if (!info->irq)
1355                 return 0;
1356
1357         if (info->si_type == SI_BT) {
1358                 rv = request_irq(info->irq,
1359                                  si_bt_irq_handler,
1360                                  IRQF_SHARED,
1361                                  DEVICE_NAME,
1362                                  info);
1363                 if (!rv)
1364                         /* Enable the interrupt in the BT interface. */
1365                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1366                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1367         } else
1368                 rv = request_irq(info->irq,
1369                                  si_irq_handler,
1370                                  IRQF_SHARED,
1371                                  DEVICE_NAME,
1372                                  info);
1373         if (rv) {
1374                 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1375                          " running polled\n",
1376                          DEVICE_NAME, info->irq);
1377                 info->irq = 0;
1378         } else {
1379                 info->irq_cleanup = std_irq_cleanup;
1380                 dev_info(info->dev, "Using irq %d\n", info->irq);
1381         }
1382
1383         return rv;
1384 }
1385
1386 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1387 {
1388         unsigned int addr = io->addr_data;
1389
1390         return inb(addr + (offset * io->regspacing));
1391 }
1392
1393 static void port_outb(struct si_sm_io *io, unsigned int offset,
1394                       unsigned char b)
1395 {
1396         unsigned int addr = io->addr_data;
1397
1398         outb(b, addr + (offset * io->regspacing));
1399 }
1400
1401 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1402 {
1403         unsigned int addr = io->addr_data;
1404
1405         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1406 }
1407
1408 static void port_outw(struct si_sm_io *io, unsigned int offset,
1409                       unsigned char b)
1410 {
1411         unsigned int addr = io->addr_data;
1412
1413         outw(b << io->regshift, addr + (offset * io->regspacing));
1414 }
1415
1416 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1417 {
1418         unsigned int addr = io->addr_data;
1419
1420         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1421 }
1422
1423 static void port_outl(struct si_sm_io *io, unsigned int offset,
1424                       unsigned char b)
1425 {
1426         unsigned int addr = io->addr_data;
1427
1428         outl(b << io->regshift, addr+(offset * io->regspacing));
1429 }
1430
1431 static void port_cleanup(struct smi_info *info)
1432 {
1433         unsigned int addr = info->io.addr_data;
1434         int          idx;
1435
1436         if (addr) {
1437                 for (idx = 0; idx < info->io_size; idx++)
1438                         release_region(addr + idx * info->io.regspacing,
1439                                        info->io.regsize);
1440         }
1441 }
1442
1443 static int port_setup(struct smi_info *info)
1444 {
1445         unsigned int addr = info->io.addr_data;
1446         int          idx;
1447
1448         if (!addr)
1449                 return -ENODEV;
1450
1451         info->io_cleanup = port_cleanup;
1452
1453         /*
1454          * Figure out the actual inb/inw/inl/etc routine to use based
1455          * upon the register size.
1456          */
1457         switch (info->io.regsize) {
1458         case 1:
1459                 info->io.inputb = port_inb;
1460                 info->io.outputb = port_outb;
1461                 break;
1462         case 2:
1463                 info->io.inputb = port_inw;
1464                 info->io.outputb = port_outw;
1465                 break;
1466         case 4:
1467                 info->io.inputb = port_inl;
1468                 info->io.outputb = port_outl;
1469                 break;
1470         default:
1471                 dev_warn(info->dev, "Invalid register size: %d\n",
1472                          info->io.regsize);
1473                 return -EINVAL;
1474         }
1475
1476         /*
1477          * Some BIOSes reserve disjoint I/O regions in their ACPI
1478          * tables.  This causes problems when trying to register the
1479          * entire I/O region.  Therefore we must register each I/O
1480          * port separately.
1481          */
1482         for (idx = 0; idx < info->io_size; idx++) {
1483                 if (request_region(addr + idx * info->io.regspacing,
1484                                    info->io.regsize, DEVICE_NAME) == NULL) {
1485                         /* Undo allocations */
1486                         while (idx--) {
1487                                 release_region(addr + idx * info->io.regspacing,
1488                                                info->io.regsize);
1489                         }
1490                         return -EIO;
1491                 }
1492         }
1493         return 0;
1494 }
1495
1496 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1497 {
1498         return readb((io->addr)+(offset * io->regspacing));
1499 }
1500
1501 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1502                      unsigned char b)
1503 {
1504         writeb(b, (io->addr)+(offset * io->regspacing));
1505 }
1506
1507 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1508 {
1509         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1510                 & 0xff;
1511 }
1512
1513 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1514                      unsigned char b)
1515 {
1516         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1517 }
1518
1519 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1520 {
1521         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1522                 & 0xff;
1523 }
1524
1525 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1526                      unsigned char b)
1527 {
1528         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1529 }
1530
1531 #ifdef readq
1532 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1533 {
1534         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1535                 & 0xff;
1536 }
1537
1538 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1539                      unsigned char b)
1540 {
1541         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1542 }
1543 #endif
1544
1545 static void mem_cleanup(struct smi_info *info)
1546 {
1547         unsigned long addr = info->io.addr_data;
1548         int           mapsize;
1549
1550         if (info->io.addr) {
1551                 iounmap(info->io.addr);
1552
1553                 mapsize = ((info->io_size * info->io.regspacing)
1554                            - (info->io.regspacing - info->io.regsize));
1555
1556                 release_mem_region(addr, mapsize);
1557         }
1558 }
1559
1560 static int mem_setup(struct smi_info *info)
1561 {
1562         unsigned long addr = info->io.addr_data;
1563         int           mapsize;
1564
1565         if (!addr)
1566                 return -ENODEV;
1567
1568         info->io_cleanup = mem_cleanup;
1569
1570         /*
1571          * Figure out the actual readb/readw/readl/etc routine to use based
1572          * upon the register size.
1573          */
1574         switch (info->io.regsize) {
1575         case 1:
1576                 info->io.inputb = intf_mem_inb;
1577                 info->io.outputb = intf_mem_outb;
1578                 break;
1579         case 2:
1580                 info->io.inputb = intf_mem_inw;
1581                 info->io.outputb = intf_mem_outw;
1582                 break;
1583         case 4:
1584                 info->io.inputb = intf_mem_inl;
1585                 info->io.outputb = intf_mem_outl;
1586                 break;
1587 #ifdef readq
1588         case 8:
1589                 info->io.inputb = mem_inq;
1590                 info->io.outputb = mem_outq;
1591                 break;
1592 #endif
1593         default:
1594                 dev_warn(info->dev, "Invalid register size: %d\n",
1595                          info->io.regsize);
1596                 return -EINVAL;
1597         }
1598
1599         /*
1600          * Calculate the total amount of memory to claim.  This is an
1601          * unusual looking calculation, but it avoids claiming any
1602          * more memory than it has to.  It will claim everything
1603          * between the first address to the end of the last full
1604          * register.
1605          */
1606         mapsize = ((info->io_size * info->io.regspacing)
1607                    - (info->io.regspacing - info->io.regsize));
1608
1609         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1610                 return -EIO;
1611
1612         info->io.addr = ioremap(addr, mapsize);
1613         if (info->io.addr == NULL) {
1614                 release_mem_region(addr, mapsize);
1615                 return -EIO;
1616         }
1617         return 0;
1618 }
1619
1620 /*
1621  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1622  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1623  * Options are:
1624  *   rsp=<regspacing>
1625  *   rsi=<regsize>
1626  *   rsh=<regshift>
1627  *   irq=<irq>
1628  *   ipmb=<ipmb addr>
1629  */
1630 enum hotmod_op { HM_ADD, HM_REMOVE };
1631 struct hotmod_vals {
1632         char *name;
1633         int  val;
1634 };
1635 static struct hotmod_vals hotmod_ops[] = {
1636         { "add",        HM_ADD },
1637         { "remove",     HM_REMOVE },
1638         { NULL }
1639 };
1640 static struct hotmod_vals hotmod_si[] = {
1641         { "kcs",        SI_KCS },
1642         { "smic",       SI_SMIC },
1643         { "bt",         SI_BT },
1644         { NULL }
1645 };
1646 static struct hotmod_vals hotmod_as[] = {
1647         { "mem",        IPMI_MEM_ADDR_SPACE },
1648         { "i/o",        IPMI_IO_ADDR_SPACE },
1649         { NULL }
1650 };
1651
1652 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1653 {
1654         char *s;
1655         int  i;
1656
1657         s = strchr(*curr, ',');
1658         if (!s) {
1659                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1660                 return -EINVAL;
1661         }
1662         *s = '\0';
1663         s++;
1664         for (i = 0; hotmod_ops[i].name; i++) {
1665                 if (strcmp(*curr, v[i].name) == 0) {
1666                         *val = v[i].val;
1667                         *curr = s;
1668                         return 0;
1669                 }
1670         }
1671
1672         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1673         return -EINVAL;
1674 }
1675
1676 static int check_hotmod_int_op(const char *curr, const char *option,
1677                                const char *name, int *val)
1678 {
1679         char *n;
1680
1681         if (strcmp(curr, name) == 0) {
1682                 if (!option) {
1683                         printk(KERN_WARNING PFX
1684                                "No option given for '%s'\n",
1685                                curr);
1686                         return -EINVAL;
1687                 }
1688                 *val = simple_strtoul(option, &n, 0);
1689                 if ((*n != '\0') || (*option == '\0')) {
1690                         printk(KERN_WARNING PFX
1691                                "Bad option given for '%s'\n",
1692                                curr);
1693                         return -EINVAL;
1694                 }
1695                 return 1;
1696         }
1697         return 0;
1698 }
1699
1700 static struct smi_info *smi_info_alloc(void)
1701 {
1702         struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1703
1704         if (info)
1705                 spin_lock_init(&info->si_lock);
1706         return info;
1707 }
1708
1709 static int hotmod_handler(const char *val, struct kernel_param *kp)
1710 {
1711         char *str = kstrdup(val, GFP_KERNEL);
1712         int  rv;
1713         char *next, *curr, *s, *n, *o;
1714         enum hotmod_op op;
1715         enum si_type si_type;
1716         int  addr_space;
1717         unsigned long addr;
1718         int regspacing;
1719         int regsize;
1720         int regshift;
1721         int irq;
1722         int ipmb;
1723         int ival;
1724         int len;
1725         struct smi_info *info;
1726
1727         if (!str)
1728                 return -ENOMEM;
1729
1730         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1731         len = strlen(str);
1732         ival = len - 1;
1733         while ((ival >= 0) && isspace(str[ival])) {
1734                 str[ival] = '\0';
1735                 ival--;
1736         }
1737
1738         for (curr = str; curr; curr = next) {
1739                 regspacing = 1;
1740                 regsize = 1;
1741                 regshift = 0;
1742                 irq = 0;
1743                 ipmb = 0; /* Choose the default if not specified */
1744
1745                 next = strchr(curr, ':');
1746                 if (next) {
1747                         *next = '\0';
1748                         next++;
1749                 }
1750
1751                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1752                 if (rv)
1753                         break;
1754                 op = ival;
1755
1756                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1757                 if (rv)
1758                         break;
1759                 si_type = ival;
1760
1761                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1762                 if (rv)
1763                         break;
1764
1765                 s = strchr(curr, ',');
1766                 if (s) {
1767                         *s = '\0';
1768                         s++;
1769                 }
1770                 addr = simple_strtoul(curr, &n, 0);
1771                 if ((*n != '\0') || (*curr == '\0')) {
1772                         printk(KERN_WARNING PFX "Invalid hotmod address"
1773                                " '%s'\n", curr);
1774                         break;
1775                 }
1776
1777                 while (s) {
1778                         curr = s;
1779                         s = strchr(curr, ',');
1780                         if (s) {
1781                                 *s = '\0';
1782                                 s++;
1783                         }
1784                         o = strchr(curr, '=');
1785                         if (o) {
1786                                 *o = '\0';
1787                                 o++;
1788                         }
1789                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1790                         if (rv < 0)
1791                                 goto out;
1792                         else if (rv)
1793                                 continue;
1794                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1795                         if (rv < 0)
1796                                 goto out;
1797                         else if (rv)
1798                                 continue;
1799                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1800                         if (rv < 0)
1801                                 goto out;
1802                         else if (rv)
1803                                 continue;
1804                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1805                         if (rv < 0)
1806                                 goto out;
1807                         else if (rv)
1808                                 continue;
1809                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1810                         if (rv < 0)
1811                                 goto out;
1812                         else if (rv)
1813                                 continue;
1814
1815                         rv = -EINVAL;
1816                         printk(KERN_WARNING PFX
1817                                "Invalid hotmod option '%s'\n",
1818                                curr);
1819                         goto out;
1820                 }
1821
1822                 if (op == HM_ADD) {
1823                         info = smi_info_alloc();
1824                         if (!info) {
1825                                 rv = -ENOMEM;
1826                                 goto out;
1827                         }
1828
1829                         info->addr_source = SI_HOTMOD;
1830                         info->si_type = si_type;
1831                         info->io.addr_data = addr;
1832                         info->io.addr_type = addr_space;
1833                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1834                                 info->io_setup = mem_setup;
1835                         else
1836                                 info->io_setup = port_setup;
1837
1838                         info->io.addr = NULL;
1839                         info->io.regspacing = regspacing;
1840                         if (!info->io.regspacing)
1841                                 info->io.regspacing = DEFAULT_REGSPACING;
1842                         info->io.regsize = regsize;
1843                         if (!info->io.regsize)
1844                                 info->io.regsize = DEFAULT_REGSPACING;
1845                         info->io.regshift = regshift;
1846                         info->irq = irq;
1847                         if (info->irq)
1848                                 info->irq_setup = std_irq_setup;
1849                         info->slave_addr = ipmb;
1850
1851                         rv = add_smi(info);
1852                         if (rv) {
1853                                 kfree(info);
1854                                 goto out;
1855                         }
1856                         rv = try_smi_init(info);
1857                         if (rv) {
1858                                 cleanup_one_si(info);
1859                                 goto out;
1860                         }
1861                 } else {
1862                         /* remove */
1863                         struct smi_info *e, *tmp_e;
1864
1865                         mutex_lock(&smi_infos_lock);
1866                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1867                                 if (e->io.addr_type != addr_space)
1868                                         continue;
1869                                 if (e->si_type != si_type)
1870                                         continue;
1871                                 if (e->io.addr_data == addr)
1872                                         cleanup_one_si(e);
1873                         }
1874                         mutex_unlock(&smi_infos_lock);
1875                 }
1876         }
1877         rv = len;
1878  out:
1879         kfree(str);
1880         return rv;
1881 }
1882
1883 static int hardcode_find_bmc(void)
1884 {
1885         int ret = -ENODEV;
1886         int             i;
1887         struct smi_info *info;
1888
1889         for (i = 0; i < SI_MAX_PARMS; i++) {
1890                 if (!ports[i] && !addrs[i])
1891                         continue;
1892
1893                 info = smi_info_alloc();
1894                 if (!info)
1895                         return -ENOMEM;
1896
1897                 info->addr_source = SI_HARDCODED;
1898                 printk(KERN_INFO PFX "probing via hardcoded address\n");
1899
1900                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1901                         info->si_type = SI_KCS;
1902                 } else if (strcmp(si_type[i], "smic") == 0) {
1903                         info->si_type = SI_SMIC;
1904                 } else if (strcmp(si_type[i], "bt") == 0) {
1905                         info->si_type = SI_BT;
1906                 } else {
1907                         printk(KERN_WARNING PFX "Interface type specified "
1908                                "for interface %d, was invalid: %s\n",
1909                                i, si_type[i]);
1910                         kfree(info);
1911                         continue;
1912                 }
1913
1914                 if (ports[i]) {
1915                         /* An I/O port */
1916                         info->io_setup = port_setup;
1917                         info->io.addr_data = ports[i];
1918                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1919                 } else if (addrs[i]) {
1920                         /* A memory port */
1921                         info->io_setup = mem_setup;
1922                         info->io.addr_data = addrs[i];
1923                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1924                 } else {
1925                         printk(KERN_WARNING PFX "Interface type specified "
1926                                "for interface %d, but port and address were "
1927                                "not set or set to zero.\n", i);
1928                         kfree(info);
1929                         continue;
1930                 }
1931
1932                 info->io.addr = NULL;
1933                 info->io.regspacing = regspacings[i];
1934                 if (!info->io.regspacing)
1935                         info->io.regspacing = DEFAULT_REGSPACING;
1936                 info->io.regsize = regsizes[i];
1937                 if (!info->io.regsize)
1938                         info->io.regsize = DEFAULT_REGSPACING;
1939                 info->io.regshift = regshifts[i];
1940                 info->irq = irqs[i];
1941                 if (info->irq)
1942                         info->irq_setup = std_irq_setup;
1943                 info->slave_addr = slave_addrs[i];
1944
1945                 if (!add_smi(info)) {
1946                         if (try_smi_init(info))
1947                                 cleanup_one_si(info);
1948                         ret = 0;
1949                 } else {
1950                         kfree(info);
1951                 }
1952         }
1953         return ret;
1954 }
1955
1956 #ifdef CONFIG_ACPI
1957
1958 #include <linux/acpi.h>
1959
1960 /*
1961  * Once we get an ACPI failure, we don't try any more, because we go
1962  * through the tables sequentially.  Once we don't find a table, there
1963  * are no more.
1964  */
1965 static int acpi_failure;
1966
1967 /* For GPE-type interrupts. */
1968 static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
1969         u32 gpe_number, void *context)
1970 {
1971         struct smi_info *smi_info = context;
1972         unsigned long   flags;
1973 #ifdef DEBUG_TIMING
1974         struct timeval t;
1975 #endif
1976
1977         spin_lock_irqsave(&(smi_info->si_lock), flags);
1978
1979         smi_inc_stat(smi_info, interrupts);
1980
1981 #ifdef DEBUG_TIMING
1982         do_gettimeofday(&t);
1983         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1984 #endif
1985         smi_event_handler(smi_info, 0);
1986         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1987
1988         return ACPI_INTERRUPT_HANDLED;
1989 }
1990
1991 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1992 {
1993         if (!info->irq)
1994                 return;
1995
1996         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1997 }
1998
1999 static int acpi_gpe_irq_setup(struct smi_info *info)
2000 {
2001         acpi_status status;
2002
2003         if (!info->irq)
2004                 return 0;
2005
2006         /* FIXME - is level triggered right? */
2007         status = acpi_install_gpe_handler(NULL,
2008                                           info->irq,
2009                                           ACPI_GPE_LEVEL_TRIGGERED,
2010                                           &ipmi_acpi_gpe,
2011                                           info);
2012         if (status != AE_OK) {
2013                 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
2014                          " running polled\n", DEVICE_NAME, info->irq);
2015                 info->irq = 0;
2016                 return -EINVAL;
2017         } else {
2018                 info->irq_cleanup = acpi_gpe_irq_cleanup;
2019                 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
2020                 return 0;
2021         }
2022 }
2023
2024 /*
2025  * Defined at
2026  * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
2027  */
2028 struct SPMITable {
2029         s8      Signature[4];
2030         u32     Length;
2031         u8      Revision;
2032         u8      Checksum;
2033         s8      OEMID[6];
2034         s8      OEMTableID[8];
2035         s8      OEMRevision[4];
2036         s8      CreatorID[4];
2037         s8      CreatorRevision[4];
2038         u8      InterfaceType;
2039         u8      IPMIlegacy;
2040         s16     SpecificationRevision;
2041
2042         /*
2043          * Bit 0 - SCI interrupt supported
2044          * Bit 1 - I/O APIC/SAPIC
2045          */
2046         u8      InterruptType;
2047
2048         /*
2049          * If bit 0 of InterruptType is set, then this is the SCI
2050          * interrupt in the GPEx_STS register.
2051          */
2052         u8      GPE;
2053
2054         s16     Reserved;
2055
2056         /*
2057          * If bit 1 of InterruptType is set, then this is the I/O
2058          * APIC/SAPIC interrupt.
2059          */
2060         u32     GlobalSystemInterrupt;
2061
2062         /* The actual register address. */
2063         struct acpi_generic_address addr;
2064
2065         u8      UID[4];
2066
2067         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
2068 };
2069
2070 static int try_init_spmi(struct SPMITable *spmi)
2071 {
2072         struct smi_info  *info;
2073         int rv;
2074
2075         if (spmi->IPMIlegacy != 1) {
2076                 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
2077                 return -ENODEV;
2078         }
2079
2080         info = smi_info_alloc();
2081         if (!info) {
2082                 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2083                 return -ENOMEM;
2084         }
2085
2086         info->addr_source = SI_SPMI;
2087         printk(KERN_INFO PFX "probing via SPMI\n");
2088
2089         /* Figure out the interface type. */
2090         switch (spmi->InterfaceType) {
2091         case 1: /* KCS */
2092                 info->si_type = SI_KCS;
2093                 break;
2094         case 2: /* SMIC */
2095                 info->si_type = SI_SMIC;
2096                 break;
2097         case 3: /* BT */
2098                 info->si_type = SI_BT;
2099                 break;
2100         default:
2101                 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2102                        spmi->InterfaceType);
2103                 kfree(info);
2104                 return -EIO;
2105         }
2106
2107         if (spmi->InterruptType & 1) {
2108                 /* We've got a GPE interrupt. */
2109                 info->irq = spmi->GPE;
2110                 info->irq_setup = acpi_gpe_irq_setup;
2111         } else if (spmi->InterruptType & 2) {
2112                 /* We've got an APIC/SAPIC interrupt. */
2113                 info->irq = spmi->GlobalSystemInterrupt;
2114                 info->irq_setup = std_irq_setup;
2115         } else {
2116                 /* Use the default interrupt setting. */
2117                 info->irq = 0;
2118                 info->irq_setup = NULL;
2119         }
2120
2121         if (spmi->addr.bit_width) {
2122                 /* A (hopefully) properly formed register bit width. */
2123                 info->io.regspacing = spmi->addr.bit_width / 8;
2124         } else {
2125                 info->io.regspacing = DEFAULT_REGSPACING;
2126         }
2127         info->io.regsize = info->io.regspacing;
2128         info->io.regshift = spmi->addr.bit_offset;
2129
2130         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
2131                 info->io_setup = mem_setup;
2132                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2133         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
2134                 info->io_setup = port_setup;
2135                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2136         } else {
2137                 kfree(info);
2138                 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2139                 return -EIO;
2140         }
2141         info->io.addr_data = spmi->addr.address;
2142
2143         pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
2144                  (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2145                  info->io.addr_data, info->io.regsize, info->io.regspacing,
2146                  info->irq);
2147
2148         rv = add_smi(info);
2149         if (rv)
2150                 kfree(info);
2151
2152         return rv;
2153 }
2154
2155 static void spmi_find_bmc(void)
2156 {
2157         acpi_status      status;
2158         struct SPMITable *spmi;
2159         int              i;
2160
2161         if (acpi_disabled)
2162                 return;
2163
2164         if (acpi_failure)
2165                 return;
2166
2167         for (i = 0; ; i++) {
2168                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
2169                                         (struct acpi_table_header **)&spmi);
2170                 if (status != AE_OK)
2171                         return;
2172
2173                 try_init_spmi(spmi);
2174         }
2175 }
2176
2177 static int ipmi_pnp_probe(struct pnp_dev *dev,
2178                                     const struct pnp_device_id *dev_id)
2179 {
2180         struct acpi_device *acpi_dev;
2181         struct smi_info *info;
2182         struct resource *res, *res_second;
2183         acpi_handle handle;
2184         acpi_status status;
2185         unsigned long long tmp;
2186         int rv;
2187
2188         acpi_dev = pnp_acpi_device(dev);
2189         if (!acpi_dev)
2190                 return -ENODEV;
2191
2192         info = smi_info_alloc();
2193         if (!info)
2194                 return -ENOMEM;
2195
2196         info->addr_source = SI_ACPI;
2197         printk(KERN_INFO PFX "probing via ACPI\n");
2198
2199         handle = acpi_dev->handle;
2200         info->addr_info.acpi_info.acpi_handle = handle;
2201
2202         /* _IFT tells us the interface type: KCS, BT, etc */
2203         status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
2204         if (ACPI_FAILURE(status))
2205                 goto err_free;
2206
2207         switch (tmp) {
2208         case 1:
2209                 info->si_type = SI_KCS;
2210                 break;
2211         case 2:
2212                 info->si_type = SI_SMIC;
2213                 break;
2214         case 3:
2215                 info->si_type = SI_BT;
2216                 break;
2217         default:
2218                 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2219                 goto err_free;
2220         }
2221
2222         res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2223         if (res) {
2224                 info->io_setup = port_setup;
2225                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2226         } else {
2227                 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2228                 if (res) {
2229                         info->io_setup = mem_setup;
2230                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2231                 }
2232         }
2233         if (!res) {
2234                 dev_err(&dev->dev, "no I/O or memory address\n");
2235                 goto err_free;
2236         }
2237         info->io.addr_data = res->start;
2238
2239         info->io.regspacing = DEFAULT_REGSPACING;
2240         res_second = pnp_get_resource(dev,
2241                                (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2242                                         IORESOURCE_IO : IORESOURCE_MEM,
2243                                1);
2244         if (res_second) {
2245                 if (res_second->start > info->io.addr_data)
2246                         info->io.regspacing = res_second->start - info->io.addr_data;
2247         }
2248         info->io.regsize = DEFAULT_REGSPACING;
2249         info->io.regshift = 0;
2250
2251         /* If _GPE exists, use it; otherwise use standard interrupts */
2252         status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
2253         if (ACPI_SUCCESS(status)) {
2254                 info->irq = tmp;
2255                 info->irq_setup = acpi_gpe_irq_setup;
2256         } else if (pnp_irq_valid(dev, 0)) {
2257                 info->irq = pnp_irq(dev, 0);
2258                 info->irq_setup = std_irq_setup;
2259         }
2260
2261         info->dev = &dev->dev;
2262         pnp_set_drvdata(dev, info);
2263
2264         dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2265                  res, info->io.regsize, info->io.regspacing,
2266                  info->irq);
2267
2268         rv = add_smi(info);
2269         if (rv)
2270                 kfree(info);
2271
2272         return rv;
2273
2274 err_free:
2275         kfree(info);
2276         return -EINVAL;
2277 }
2278
2279 static void ipmi_pnp_remove(struct pnp_dev *dev)
2280 {
2281         struct smi_info *info = pnp_get_drvdata(dev);
2282
2283         cleanup_one_si(info);
2284 }
2285
2286 static const struct pnp_device_id pnp_dev_table[] = {
2287         {"IPI0001", 0},
2288         {"", 0},
2289 };
2290
2291 static struct pnp_driver ipmi_pnp_driver = {
2292         .name           = DEVICE_NAME,
2293         .probe          = ipmi_pnp_probe,
2294         .remove         = ipmi_pnp_remove,
2295         .id_table       = pnp_dev_table,
2296 };
2297
2298 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
2299 #endif
2300
2301 #ifdef CONFIG_DMI
2302 struct dmi_ipmi_data {
2303         u8              type;
2304         u8              addr_space;
2305         unsigned long   base_addr;
2306         u8              irq;
2307         u8              offset;
2308         u8              slave_addr;
2309 };
2310
2311 static int decode_dmi(const struct dmi_header *dm,
2312                                 struct dmi_ipmi_data *dmi)
2313 {
2314         const u8        *data = (const u8 *)dm;
2315         unsigned long   base_addr;
2316         u8              reg_spacing;
2317         u8              len = dm->length;
2318
2319         dmi->type = data[4];
2320
2321         memcpy(&base_addr, data+8, sizeof(unsigned long));
2322         if (len >= 0x11) {
2323                 if (base_addr & 1) {
2324                         /* I/O */
2325                         base_addr &= 0xFFFE;
2326                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
2327                 } else
2328                         /* Memory */
2329                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
2330
2331                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
2332                    is odd. */
2333                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
2334
2335                 dmi->irq = data[0x11];
2336
2337                 /* The top two bits of byte 0x10 hold the register spacing. */
2338                 reg_spacing = (data[0x10] & 0xC0) >> 6;
2339                 switch (reg_spacing) {
2340                 case 0x00: /* Byte boundaries */
2341                     dmi->offset = 1;
2342                     break;
2343                 case 0x01: /* 32-bit boundaries */
2344                     dmi->offset = 4;
2345                     break;
2346                 case 0x02: /* 16-byte boundaries */
2347                     dmi->offset = 16;
2348                     break;
2349                 default:
2350                     /* Some other interface, just ignore it. */
2351                     return -EIO;
2352                 }
2353         } else {
2354                 /* Old DMI spec. */
2355                 /*
2356                  * Note that technically, the lower bit of the base
2357                  * address should be 1 if the address is I/O and 0 if
2358                  * the address is in memory.  So many systems get that
2359                  * wrong (and all that I have seen are I/O) so we just
2360                  * ignore that bit and assume I/O.  Systems that use
2361                  * memory should use the newer spec, anyway.
2362                  */
2363                 dmi->base_addr = base_addr & 0xfffe;
2364                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2365                 dmi->offset = 1;
2366         }
2367
2368         dmi->slave_addr = data[6];
2369
2370         return 0;
2371 }
2372
2373 static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2374 {
2375         struct smi_info *info;
2376
2377         info = smi_info_alloc();
2378         if (!info) {
2379                 printk(KERN_ERR PFX "Could not allocate SI data\n");
2380                 return;
2381         }
2382
2383         info->addr_source = SI_SMBIOS;
2384         printk(KERN_INFO PFX "probing via SMBIOS\n");
2385
2386         switch (ipmi_data->type) {
2387         case 0x01: /* KCS */
2388                 info->si_type = SI_KCS;
2389                 break;
2390         case 0x02: /* SMIC */
2391                 info->si_type = SI_SMIC;
2392                 break;
2393         case 0x03: /* BT */
2394                 info->si_type = SI_BT;
2395                 break;
2396         default:
2397                 kfree(info);
2398                 return;
2399         }
2400
2401         switch (ipmi_data->addr_space) {
2402         case IPMI_MEM_ADDR_SPACE:
2403                 info->io_setup = mem_setup;
2404                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2405                 break;
2406
2407         case IPMI_IO_ADDR_SPACE:
2408                 info->io_setup = port_setup;
2409                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2410                 break;
2411
2412         default:
2413                 kfree(info);
2414                 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2415                        ipmi_data->addr_space);
2416                 return;
2417         }
2418         info->io.addr_data = ipmi_data->base_addr;
2419
2420         info->io.regspacing = ipmi_data->offset;
2421         if (!info->io.regspacing)
2422                 info->io.regspacing = DEFAULT_REGSPACING;
2423         info->io.regsize = DEFAULT_REGSPACING;
2424         info->io.regshift = 0;
2425
2426         info->slave_addr = ipmi_data->slave_addr;
2427
2428         info->irq = ipmi_data->irq;
2429         if (info->irq)
2430                 info->irq_setup = std_irq_setup;
2431
2432         pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
2433                  (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2434                  info->io.addr_data, info->io.regsize, info->io.regspacing,
2435                  info->irq);
2436
2437         if (add_smi(info))
2438                 kfree(info);
2439 }
2440
2441 static void dmi_find_bmc(void)
2442 {
2443         const struct dmi_device *dev = NULL;
2444         struct dmi_ipmi_data data;
2445         int                  rv;
2446
2447         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2448                 memset(&data, 0, sizeof(data));
2449                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2450                                 &data);
2451                 if (!rv)
2452                         try_init_dmi(&data);
2453         }
2454 }
2455 #endif /* CONFIG_DMI */
2456
2457 #ifdef CONFIG_PCI
2458
2459 #define PCI_ERMC_CLASSCODE              0x0C0700
2460 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2461 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2462 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2463 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2464 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2465
2466 #define PCI_HP_VENDOR_ID    0x103C
2467 #define PCI_MMC_DEVICE_ID   0x121A
2468 #define PCI_MMC_ADDR_CW     0x10
2469
2470 static void ipmi_pci_cleanup(struct smi_info *info)
2471 {
2472         struct pci_dev *pdev = info->addr_source_data;
2473
2474         pci_disable_device(pdev);
2475 }
2476
2477 static int ipmi_pci_probe_regspacing(struct smi_info *info)
2478 {
2479         if (info->si_type == SI_KCS) {
2480                 unsigned char   status;
2481                 int             regspacing;
2482
2483                 info->io.regsize = DEFAULT_REGSIZE;
2484                 info->io.regshift = 0;
2485                 info->io_size = 2;
2486                 info->handlers = &kcs_smi_handlers;
2487
2488                 /* detect 1, 4, 16byte spacing */
2489                 for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
2490                         info->io.regspacing = regspacing;
2491                         if (info->io_setup(info)) {
2492                                 dev_err(info->dev,
2493                                         "Could not setup I/O space\n");
2494                                 return DEFAULT_REGSPACING;
2495                         }
2496                         /* write invalid cmd */
2497                         info->io.outputb(&info->io, 1, 0x10);
2498                         /* read status back */
2499                         status = info->io.inputb(&info->io, 1);
2500                         info->io_cleanup(info);
2501                         if (status)
2502                                 return regspacing;
2503                         regspacing *= 4;
2504                 }
2505         }
2506         return DEFAULT_REGSPACING;
2507 }
2508
2509 static int ipmi_pci_probe(struct pci_dev *pdev,
2510                                     const struct pci_device_id *ent)
2511 {
2512         int rv;
2513         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2514         struct smi_info *info;
2515
2516         info = smi_info_alloc();
2517         if (!info)
2518                 return -ENOMEM;
2519
2520         info->addr_source = SI_PCI;
2521         dev_info(&pdev->dev, "probing via PCI");
2522
2523         switch (class_type) {
2524         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2525                 info->si_type = SI_SMIC;
2526                 break;
2527
2528         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2529                 info->si_type = SI_KCS;
2530                 break;
2531
2532         case PCI_ERMC_CLASSCODE_TYPE_BT:
2533                 info->si_type = SI_BT;
2534                 break;
2535
2536         default:
2537                 kfree(info);
2538                 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2539                 return -ENOMEM;
2540         }
2541
2542         rv = pci_enable_device(pdev);
2543         if (rv) {
2544                 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2545                 kfree(info);
2546                 return rv;
2547         }
2548
2549         info->addr_source_cleanup = ipmi_pci_cleanup;
2550         info->addr_source_data = pdev;
2551
2552         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2553                 info->io_setup = port_setup;
2554                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2555         } else {
2556                 info->io_setup = mem_setup;
2557                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2558         }
2559         info->io.addr_data = pci_resource_start(pdev, 0);
2560
2561         info->io.regspacing = ipmi_pci_probe_regspacing(info);
2562         info->io.regsize = DEFAULT_REGSIZE;
2563         info->io.regshift = 0;
2564
2565         info->irq = pdev->irq;
2566         if (info->irq)
2567                 info->irq_setup = std_irq_setup;
2568
2569         info->dev = &pdev->dev;
2570         pci_set_drvdata(pdev, info);
2571
2572         dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2573                 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2574                 info->irq);
2575
2576         rv = add_smi(info);
2577         if (rv) {
2578                 kfree(info);
2579                 pci_disable_device(pdev);
2580         }
2581
2582         return rv;
2583 }
2584
2585 static void ipmi_pci_remove(struct pci_dev *pdev)
2586 {
2587         struct smi_info *info = pci_get_drvdata(pdev);
2588         cleanup_one_si(info);
2589         pci_disable_device(pdev);
2590 }
2591
2592 static struct pci_device_id ipmi_pci_devices[] = {
2593         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2594         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2595         { 0, }
2596 };
2597 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2598
2599 static struct pci_driver ipmi_pci_driver = {
2600         .name =         DEVICE_NAME,
2601         .id_table =     ipmi_pci_devices,
2602         .probe =        ipmi_pci_probe,
2603         .remove =       ipmi_pci_remove,
2604 };
2605 #endif /* CONFIG_PCI */
2606
2607 static struct of_device_id ipmi_match[];
2608 static int ipmi_probe(struct platform_device *dev)
2609 {
2610 #ifdef CONFIG_OF
2611         const struct of_device_id *match;
2612         struct smi_info *info;
2613         struct resource resource;
2614         const __be32 *regsize, *regspacing, *regshift;
2615         struct device_node *np = dev->dev.of_node;
2616         int ret;
2617         int proplen;
2618
2619         dev_info(&dev->dev, "probing via device tree\n");
2620
2621         match = of_match_device(ipmi_match, &dev->dev);
2622         if (!match)
2623                 return -EINVAL;
2624
2625         ret = of_address_to_resource(np, 0, &resource);
2626         if (ret) {
2627                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2628                 return ret;
2629         }
2630
2631         regsize = of_get_property(np, "reg-size", &proplen);
2632         if (regsize && proplen != 4) {
2633                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2634                 return -EINVAL;
2635         }
2636
2637         regspacing = of_get_property(np, "reg-spacing", &proplen);
2638         if (regspacing && proplen != 4) {
2639                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2640                 return -EINVAL;
2641         }
2642
2643         regshift = of_get_property(np, "reg-shift", &proplen);
2644         if (regshift && proplen != 4) {
2645                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2646                 return -EINVAL;
2647         }
2648
2649         info = smi_info_alloc();
2650
2651         if (!info) {
2652                 dev_err(&dev->dev,
2653                         "could not allocate memory for OF probe\n");
2654                 return -ENOMEM;
2655         }
2656
2657         info->si_type           = (enum si_type) match->data;
2658         info->addr_source       = SI_DEVICETREE;
2659         info->irq_setup         = std_irq_setup;
2660
2661         if (resource.flags & IORESOURCE_IO) {
2662                 info->io_setup          = port_setup;
2663                 info->io.addr_type      = IPMI_IO_ADDR_SPACE;
2664         } else {
2665                 info->io_setup          = mem_setup;
2666                 info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2667         }
2668
2669         info->io.addr_data      = resource.start;
2670
2671         info->io.regsize        = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
2672         info->io.regspacing     = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
2673         info->io.regshift       = regshift ? be32_to_cpup(regshift) : 0;
2674
2675         info->irq               = irq_of_parse_and_map(dev->dev.of_node, 0);
2676         info->dev               = &dev->dev;
2677
2678         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2679                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2680                 info->irq);
2681
2682         dev_set_drvdata(&dev->dev, info);
2683
2684         ret = add_smi(info);
2685         if (ret) {
2686                 kfree(info);
2687                 return ret;
2688         }
2689 #endif
2690         return 0;
2691 }
2692
2693 static int ipmi_remove(struct platform_device *dev)
2694 {
2695 #ifdef CONFIG_OF
2696         cleanup_one_si(dev_get_drvdata(&dev->dev));
2697 #endif
2698         return 0;
2699 }
2700
2701 static struct of_device_id ipmi_match[] =
2702 {
2703         { .type = "ipmi", .compatible = "ipmi-kcs",
2704           .data = (void *)(unsigned long) SI_KCS },
2705         { .type = "ipmi", .compatible = "ipmi-smic",
2706           .data = (void *)(unsigned long) SI_SMIC },
2707         { .type = "ipmi", .compatible = "ipmi-bt",
2708           .data = (void *)(unsigned long) SI_BT },
2709         {},
2710 };
2711
2712 static struct platform_driver ipmi_driver = {
2713         .driver = {
2714                 .name = DEVICE_NAME,
2715                 .owner = THIS_MODULE,
2716                 .of_match_table = ipmi_match,
2717         },
2718         .probe          = ipmi_probe,
2719         .remove         = ipmi_remove,
2720 };
2721
2722 #ifdef CONFIG_PARISC
2723 static int ipmi_parisc_probe(struct parisc_device *dev)
2724 {
2725         struct smi_info *info;
2726         int rv;
2727
2728         info = smi_info_alloc();
2729
2730         if (!info) {
2731                 dev_err(&dev->dev,
2732                         "could not allocate memory for PARISC probe\n");
2733                 return -ENOMEM;
2734         }
2735
2736         info->si_type           = SI_KCS;
2737         info->addr_source       = SI_DEVICETREE;
2738         info->io_setup          = mem_setup;
2739         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2740         info->io.addr_data      = dev->hpa.start;
2741         info->io.regsize        = 1;
2742         info->io.regspacing     = 1;
2743         info->io.regshift       = 0;
2744         info->irq               = 0; /* no interrupt */
2745         info->irq_setup         = NULL;
2746         info->dev               = &dev->dev;
2747
2748         dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
2749
2750         dev_set_drvdata(&dev->dev, info);
2751
2752         rv = add_smi(info);
2753         if (rv) {
2754                 kfree(info);
2755                 return rv;
2756         }
2757
2758         return 0;
2759 }
2760
2761 static int ipmi_parisc_remove(struct parisc_device *dev)
2762 {
2763         cleanup_one_si(dev_get_drvdata(&dev->dev));
2764         return 0;
2765 }
2766
2767 static struct parisc_device_id ipmi_parisc_tbl[] = {
2768         { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
2769         { 0, }
2770 };
2771
2772 static struct parisc_driver ipmi_parisc_driver = {
2773         .name =         "ipmi",
2774         .id_table =     ipmi_parisc_tbl,
2775         .probe =        ipmi_parisc_probe,
2776         .remove =       ipmi_parisc_remove,
2777 };
2778 #endif /* CONFIG_PARISC */
2779
2780 static int wait_for_msg_done(struct smi_info *smi_info)
2781 {
2782         enum si_sm_result     smi_result;
2783
2784         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2785         for (;;) {
2786                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2787                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2788                         schedule_timeout_uninterruptible(1);
2789                         smi_result = smi_info->handlers->event(
2790                                 smi_info->si_sm, jiffies_to_usecs(1));
2791                 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2792                         smi_result = smi_info->handlers->event(
2793                                 smi_info->si_sm, 0);
2794                 } else
2795                         break;
2796         }
2797         if (smi_result == SI_SM_HOSED)
2798                 /*
2799                  * We couldn't get the state machine to run, so whatever's at
2800                  * the port is probably not an IPMI SMI interface.
2801                  */
2802                 return -ENODEV;
2803
2804         return 0;
2805 }
2806
2807 static int try_get_dev_id(struct smi_info *smi_info)
2808 {
2809         unsigned char         msg[2];
2810         unsigned char         *resp;
2811         unsigned long         resp_len;
2812         int                   rv = 0;
2813
2814         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2815         if (!resp)
2816                 return -ENOMEM;
2817
2818         /*
2819          * Do a Get Device ID command, since it comes back with some
2820          * useful info.
2821          */
2822         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2823         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2824         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2825
2826         rv = wait_for_msg_done(smi_info);
2827         if (rv)
2828                 goto out;
2829
2830         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2831                                                   resp, IPMI_MAX_MSG_LENGTH);
2832
2833         /* Check and record info from the get device id, in case we need it. */
2834         rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2835
2836  out:
2837         kfree(resp);
2838         return rv;
2839 }
2840
2841 static int try_enable_event_buffer(struct smi_info *smi_info)
2842 {
2843         unsigned char         msg[3];
2844         unsigned char         *resp;
2845         unsigned long         resp_len;
2846         int                   rv = 0;
2847
2848         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2849         if (!resp)
2850                 return -ENOMEM;
2851
2852         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2853         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
2854         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2855
2856         rv = wait_for_msg_done(smi_info);
2857         if (rv) {
2858                 printk(KERN_WARNING PFX "Error getting response from get"
2859                        " global enables command, the event buffer is not"
2860                        " enabled.\n");
2861                 goto out;
2862         }
2863
2864         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2865                                                   resp, IPMI_MAX_MSG_LENGTH);
2866
2867         if (resp_len < 4 ||
2868                         resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2869                         resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
2870                         resp[2] != 0) {
2871                 printk(KERN_WARNING PFX "Invalid return from get global"
2872                        " enables command, cannot enable the event buffer.\n");
2873                 rv = -EINVAL;
2874                 goto out;
2875         }
2876
2877         if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
2878                 /* buffer is already enabled, nothing to do. */
2879                 goto out;
2880
2881         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2882         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
2883         msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
2884         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
2885
2886         rv = wait_for_msg_done(smi_info);
2887         if (rv) {
2888                 printk(KERN_WARNING PFX "Error getting response from set"
2889                        " global, enables command, the event buffer is not"
2890                        " enabled.\n");
2891                 goto out;
2892         }
2893
2894         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2895                                                   resp, IPMI_MAX_MSG_LENGTH);
2896
2897         if (resp_len < 3 ||
2898                         resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2899                         resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2900                 printk(KERN_WARNING PFX "Invalid return from get global,"
2901                        "enables command, not enable the event buffer.\n");
2902                 rv = -EINVAL;
2903                 goto out;
2904         }
2905
2906         if (resp[2] != 0)
2907                 /*
2908                  * An error when setting the event buffer bit means
2909                  * that the event buffer is not supported.
2910                  */
2911                 rv = -ENOENT;
2912  out:
2913         kfree(resp);
2914         return rv;
2915 }
2916
2917 static int smi_type_proc_show(struct seq_file *m, void *v)
2918 {
2919         struct smi_info *smi = m->private;
2920
2921         return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2922 }
2923
2924 static int smi_type_proc_open(struct inode *inode, struct file *file)
2925 {
2926         return single_open(file, smi_type_proc_show, PDE_DATA(inode));
2927 }
2928
2929 static const struct file_operations smi_type_proc_ops = {
2930         .open           = smi_type_proc_open,
2931         .read           = seq_read,
2932         .llseek         = seq_lseek,
2933         .release        = single_release,
2934 };
2935
2936 static int smi_si_stats_proc_show(struct seq_file *m, void *v)
2937 {
2938         struct smi_info *smi = m->private;
2939
2940         seq_printf(m, "interrupts_enabled:    %d\n",
2941                        smi->irq && !smi->interrupt_disabled);
2942         seq_printf(m, "short_timeouts:        %u\n",
2943                        smi_get_stat(smi, short_timeouts));
2944         seq_printf(m, "long_timeouts:         %u\n",
2945                        smi_get_stat(smi, long_timeouts));
2946         seq_printf(m, "idles:                 %u\n",
2947                        smi_get_stat(smi, idles));
2948         seq_printf(m, "interrupts:            %u\n",
2949                        smi_get_stat(smi, interrupts));
2950         seq_printf(m, "attentions:            %u\n",
2951                        smi_get_stat(smi, attentions));
2952         seq_printf(m, "flag_fetches:          %u\n",
2953                        smi_get_stat(smi, flag_fetches));
2954         seq_printf(m, "hosed_count:           %u\n",
2955                        smi_get_stat(smi, hosed_count));
2956         seq_printf(m, "complete_transactions: %u\n",
2957                        smi_get_stat(smi, complete_transactions));
2958         seq_printf(m, "events:                %u\n",
2959                        smi_get_stat(smi, events));
2960         seq_printf(m, "watchdog_pretimeouts:  %u\n",
2961                        smi_get_stat(smi, watchdog_pretimeouts));
2962         seq_printf(m, "incoming_messages:     %u\n",
2963                        smi_get_stat(smi, incoming_messages));
2964         return 0;
2965 }
2966
2967 static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
2968 {
2969         return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
2970 }
2971
2972 static const struct file_operations smi_si_stats_proc_ops = {
2973         .open           = smi_si_stats_proc_open,
2974         .read           = seq_read,
2975         .llseek         = seq_lseek,
2976         .release        = single_release,
2977 };
2978
2979 static int smi_params_proc_show(struct seq_file *m, void *v)
2980 {
2981         struct smi_info *smi = m->private;
2982
2983         return seq_printf(m,
2984                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2985                        si_to_str[smi->si_type],
2986                        addr_space_to_str[smi->io.addr_type],
2987                        smi->io.addr_data,
2988                        smi->io.regspacing,
2989                        smi->io.regsize,
2990                        smi->io.regshift,
2991                        smi->irq,
2992                        smi->slave_addr);
2993 }
2994
2995 static int smi_params_proc_open(struct inode *inode, struct file *file)
2996 {
2997         return single_open(file, smi_params_proc_show, PDE_DATA(inode));
2998 }
2999
3000 static const struct file_operations smi_params_proc_ops = {
3001         .open           = smi_params_proc_open,
3002         .read           = seq_read,
3003         .llseek         = seq_lseek,
3004         .release        = single_release,
3005 };
3006
3007 /*
3008  * oem_data_avail_to_receive_msg_avail
3009  * @info - smi_info structure with msg_flags set
3010  *
3011  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
3012  * Returns 1 indicating need to re-run handle_flags().
3013  */
3014 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
3015 {
3016         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
3017                                RECEIVE_MSG_AVAIL);
3018         return 1;
3019 }
3020
3021 /*
3022  * setup_dell_poweredge_oem_data_handler
3023  * @info - smi_info.device_id must be populated
3024  *
3025  * Systems that match, but have firmware version < 1.40 may assert
3026  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
3027  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
3028  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
3029  * as RECEIVE_MSG_AVAIL instead.
3030  *
3031  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
3032  * assert the OEM[012] bits, and if it did, the driver would have to
3033  * change to handle that properly, we don't actually check for the
3034  * firmware version.
3035  * Device ID = 0x20                BMC on PowerEdge 8G servers
3036  * Device Revision = 0x80
3037  * Firmware Revision1 = 0x01       BMC version 1.40
3038  * Firmware Revision2 = 0x40       BCD encoded
3039  * IPMI Version = 0x51             IPMI 1.5
3040  * Manufacturer ID = A2 02 00      Dell IANA
3041  *
3042  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
3043  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
3044  *
3045  */
3046 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
3047 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
3048 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
3049 #define DELL_IANA_MFR_ID 0x0002a2
3050 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
3051 {
3052         struct ipmi_device_id *id = &smi_info->device_id;
3053         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
3054                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
3055                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
3056                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
3057                         smi_info->oem_data_avail_handler =
3058                                 oem_data_avail_to_receive_msg_avail;
3059                 } else if (ipmi_version_major(id) < 1 ||
3060                            (ipmi_version_major(id) == 1 &&
3061                             ipmi_version_minor(id) < 5)) {
3062                         smi_info->oem_data_avail_handler =
3063                                 oem_data_avail_to_receive_msg_avail;
3064                 }
3065         }
3066 }
3067
3068 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
3069 static void return_hosed_msg_badsize(struct smi_info *smi_info)
3070 {
3071         struct ipmi_smi_msg *msg = smi_info->curr_msg;
3072
3073         /* Make it a response */
3074         msg->rsp[0] = msg->data[0] | 4;
3075         msg->rsp[1] = msg->data[1];
3076         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
3077         msg->rsp_size = 3;
3078         smi_info->curr_msg = NULL;
3079         deliver_recv_msg(smi_info, msg);
3080 }
3081
3082 /*
3083  * dell_poweredge_bt_xaction_handler
3084  * @info - smi_info.device_id must be populated
3085  *
3086  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
3087  * not respond to a Get SDR command if the length of the data
3088  * requested is exactly 0x3A, which leads to command timeouts and no
3089  * data returned.  This intercepts such commands, and causes userspace
3090  * callers to try again with a different-sized buffer, which succeeds.
3091  */
3092
3093 #define STORAGE_NETFN 0x0A
3094 #define STORAGE_CMD_GET_SDR 0x23
3095 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
3096                                              unsigned long unused,
3097                                              void *in)
3098 {
3099         struct smi_info *smi_info = in;
3100         unsigned char *data = smi_info->curr_msg->data;
3101         unsigned int size   = smi_info->curr_msg->data_size;
3102         if (size >= 8 &&
3103             (data[0]>>2) == STORAGE_NETFN &&
3104             data[1] == STORAGE_CMD_GET_SDR &&
3105             data[7] == 0x3A) {
3106                 return_hosed_msg_badsize(smi_info);
3107                 return NOTIFY_STOP;
3108         }
3109         return NOTIFY_DONE;
3110 }
3111
3112 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
3113         .notifier_call  = dell_poweredge_bt_xaction_handler,
3114 };
3115
3116 /*
3117  * setup_dell_poweredge_bt_xaction_handler
3118  * @info - smi_info.device_id must be filled in already
3119  *
3120  * Fills in smi_info.device_id.start_transaction_pre_hook
3121  * when we know what function to use there.
3122  */
3123 static void
3124 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
3125 {
3126         struct ipmi_device_id *id = &smi_info->device_id;
3127         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
3128             smi_info->si_type == SI_BT)
3129                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
3130 }
3131
3132 /*
3133  * setup_oem_data_handler
3134  * @info - smi_info.device_id must be filled in already
3135  *
3136  * Fills in smi_info.device_id.oem_data_available_handler
3137  * when we know what function to use there.
3138  */
3139
3140 static void setup_oem_data_handler(struct smi_info *smi_info)
3141 {
3142         setup_dell_poweredge_oem_data_handler(smi_info);
3143 }
3144
3145 static void setup_xaction_handlers(struct smi_info *smi_info)
3146 {
3147         setup_dell_poweredge_bt_xaction_handler(smi_info);
3148 }
3149
3150 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3151 {
3152         if (smi_info->intf) {
3153                 /*
3154                  * The timer and thread are only running if the
3155                  * interface has been started up and registered.
3156                  */
3157                 if (smi_info->thread != NULL)
3158                         kthread_stop(smi_info->thread);
3159                 del_timer_sync(&smi_info->si_timer);
3160         }
3161 }
3162
3163 static struct ipmi_default_vals
3164 {
3165         int type;
3166         int port;
3167 } ipmi_defaults[] =
3168 {
3169         { .type = SI_KCS, .port = 0xca2 },
3170         { .type = SI_SMIC, .port = 0xca9 },
3171         { .type = SI_BT, .port = 0xe4 },
3172         { .port = 0 }
3173 };
3174
3175 static void default_find_bmc(void)
3176 {
3177         struct smi_info *info;
3178         int             i;
3179
3180         for (i = 0; ; i++) {
3181                 if (!ipmi_defaults[i].port)
3182                         break;
3183 #ifdef CONFIG_PPC
3184                 if (check_legacy_ioport(ipmi_defaults[i].port))
3185                         continue;
3186 #endif
3187                 info = smi_info_alloc();
3188                 if (!info)
3189                         return;
3190
3191                 info->addr_source = SI_DEFAULT;
3192
3193                 info->si_type = ipmi_defaults[i].type;
3194                 info->io_setup = port_setup;
3195                 info->io.addr_data = ipmi_defaults[i].port;
3196                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
3197
3198                 info->io.addr = NULL;
3199                 info->io.regspacing = DEFAULT_REGSPACING;
3200                 info->io.regsize = DEFAULT_REGSPACING;
3201                 info->io.regshift = 0;
3202
3203                 if (add_smi(info) == 0) {
3204                         if ((try_smi_init(info)) == 0) {
3205                                 /* Found one... */
3206                                 printk(KERN_INFO PFX "Found default %s"
3207                                 " state machine at %s address 0x%lx\n",
3208                                 si_to_str[info->si_type],
3209                                 addr_space_to_str[info->io.addr_type],
3210                                 info->io.addr_data);
3211                         } else
3212                                 cleanup_one_si(info);
3213                 } else {
3214                         kfree(info);
3215                 }
3216         }
3217 }
3218
3219 static int is_new_interface(struct smi_info *info)
3220 {
3221         struct smi_info *e;
3222
3223         list_for_each_entry(e, &smi_infos, link) {
3224                 if (e->io.addr_type != info->io.addr_type)
3225                         continue;
3226                 if (e->io.addr_data == info->io.addr_data)
3227                         return 0;
3228         }
3229
3230         return 1;
3231 }
3232
3233 static int add_smi(struct smi_info *new_smi)
3234 {
3235         int rv = 0;
3236
3237         printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3238                         ipmi_addr_src_to_str[new_smi->addr_source],
3239                         si_to_str[new_smi->si_type]);
3240         mutex_lock(&smi_infos_lock);
3241         if (!is_new_interface(new_smi)) {
3242                 printk(KERN_CONT " duplicate interface\n");
3243                 rv = -EBUSY;
3244                 goto out_err;
3245         }
3246
3247         printk(KERN_CONT "\n");
3248
3249         /* So we know not to free it unless we have allocated one. */
3250         new_smi->intf = NULL;
3251         new_smi->si_sm = NULL;
3252         new_smi->handlers = NULL;
3253
3254         list_add_tail(&new_smi->link, &smi_infos);
3255
3256 out_err:
3257         mutex_unlock(&smi_infos_lock);
3258         return rv;
3259 }
3260
3261 static int try_smi_init(struct smi_info *new_smi)
3262 {
3263         int rv = 0;
3264         int i;
3265
3266         printk(KERN_INFO PFX "Trying %s-specified %s state"
3267                " machine at %s address 0x%lx, slave address 0x%x,"
3268                " irq %d\n",
3269                ipmi_addr_src_to_str[new_smi->addr_source],
3270                si_to_str[new_smi->si_type],
3271                addr_space_to_str[new_smi->io.addr_type],
3272                new_smi->io.addr_data,
3273                new_smi->slave_addr, new_smi->irq);
3274
3275         switch (new_smi->si_type) {
3276         case SI_KCS:
3277                 new_smi->handlers = &kcs_smi_handlers;
3278                 break;
3279
3280         case SI_SMIC:
3281                 new_smi->handlers = &smic_smi_handlers;
3282                 break;
3283
3284         case SI_BT:
3285                 new_smi->handlers = &bt_smi_handlers;
3286                 break;
3287
3288         default:
3289                 /* No support for anything else yet. */
3290                 rv = -EIO;
3291                 goto out_err;
3292         }
3293
3294         /* Allocate the state machine's data and initialize it. */
3295         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3296         if (!new_smi->si_sm) {
3297                 printk(KERN_ERR PFX
3298                        "Could not allocate state machine memory\n");
3299                 rv = -ENOMEM;
3300                 goto out_err;
3301         }
3302         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
3303                                                         &new_smi->io);
3304
3305         /* Now that we know the I/O size, we can set up the I/O. */
3306         rv = new_smi->io_setup(new_smi);
3307         if (rv) {
3308                 printk(KERN_ERR PFX "Could not set up I/O space\n");
3309                 goto out_err;
3310         }
3311
3312         /* Do low-level detection first. */
3313         if (new_smi->handlers->detect(new_smi->si_sm)) {
3314                 if (new_smi->addr_source)
3315                         printk(KERN_INFO PFX "Interface detection failed\n");
3316                 rv = -ENODEV;
3317                 goto out_err;
3318         }
3319
3320         /*
3321          * Attempt a get device id command.  If it fails, we probably
3322          * don't have a BMC here.
3323          */
3324         rv = try_get_dev_id(new_smi);
3325         if (rv) {
3326                 if (new_smi->addr_source)
3327                         printk(KERN_INFO PFX "There appears to be no BMC"
3328                                " at this location\n");
3329                 goto out_err;
3330         }
3331
3332         setup_oem_data_handler(new_smi);
3333         setup_xaction_handlers(new_smi);
3334
3335         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
3336         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3337         new_smi->curr_msg = NULL;
3338         atomic_set(&new_smi->req_events, 0);
3339         new_smi->run_to_completion = 0;
3340         for (i = 0; i < SI_NUM_STATS; i++)
3341                 atomic_set(&new_smi->stats[i], 0);
3342
3343         new_smi->interrupt_disabled = 1;
3344         atomic_set(&new_smi->stop_operation, 0);
3345         new_smi->intf_num = smi_num;
3346         smi_num++;
3347
3348         rv = try_enable_event_buffer(new_smi);
3349         if (rv == 0)
3350                 new_smi->has_event_buffer = 1;
3351
3352         /*
3353          * Start clearing the flags before we enable interrupts or the
3354          * timer to avoid racing with the timer.
3355          */
3356         start_clear_flags(new_smi);
3357         /* IRQ is defined to be set when non-zero. */
3358         if (new_smi->irq)
3359                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
3360
3361         if (!new_smi->dev) {
3362                 /*
3363                  * If we don't already have a device from something
3364                  * else (like PCI), then register a new one.
3365                  */
3366                 new_smi->pdev = platform_device_alloc("ipmi_si",
3367                                                       new_smi->intf_num);
3368                 if (!new_smi->pdev) {
3369                         printk(KERN_ERR PFX
3370                                "Unable to allocate platform device\n");
3371                         goto out_err;
3372                 }
3373                 new_smi->dev = &new_smi->pdev->dev;
3374                 new_smi->dev->driver = &ipmi_driver.driver;
3375
3376                 rv = platform_device_add(new_smi->pdev);
3377                 if (rv) {
3378                         printk(KERN_ERR PFX
3379                                "Unable to register system interface device:"
3380                                " %d\n",
3381                                rv);
3382                         goto out_err;
3383                 }
3384                 new_smi->dev_registered = 1;
3385         }
3386
3387         rv = ipmi_register_smi(&handlers,
3388                                new_smi,
3389                                &new_smi->device_id,
3390                                new_smi->dev,
3391                                "bmc",
3392                                new_smi->slave_addr);
3393         if (rv) {
3394                 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3395                         rv);
3396                 goto out_err_stop_timer;
3397         }
3398
3399         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3400                                      &smi_type_proc_ops,
3401                                      new_smi);
3402         if (rv) {
3403                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3404                 goto out_err_stop_timer;
3405         }
3406
3407         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3408                                      &smi_si_stats_proc_ops,
3409                                      new_smi);
3410         if (rv) {
3411                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3412                 goto out_err_stop_timer;
3413         }
3414
3415         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3416                                      &smi_params_proc_ops,
3417                                      new_smi);
3418         if (rv) {
3419                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3420                 goto out_err_stop_timer;
3421         }
3422
3423         dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3424                  si_to_str[new_smi->si_type]);
3425
3426         return 0;
3427
3428  out_err_stop_timer:
3429         atomic_inc(&new_smi->stop_operation);
3430         wait_for_timer_and_thread(new_smi);
3431
3432  out_err:
3433         new_smi->interrupt_disabled = 1;
3434
3435         if (new_smi->intf) {
3436                 ipmi_unregister_smi(new_smi->intf);
3437                 new_smi->intf = NULL;
3438         }
3439
3440         if (new_smi->irq_cleanup) {
3441                 new_smi->irq_cleanup(new_smi);
3442                 new_smi->irq_cleanup = NULL;
3443         }
3444
3445         /*
3446          * Wait until we know that we are out of any interrupt
3447          * handlers might have been running before we freed the
3448          * interrupt.
3449          */
3450         synchronize_sched();
3451
3452         if (new_smi->si_sm) {
3453                 if (new_smi->handlers)
3454                         new_smi->handlers->cleanup(new_smi->si_sm);
3455                 kfree(new_smi->si_sm);
3456                 new_smi->si_sm = NULL;
3457         }
3458         if (new_smi->addr_source_cleanup) {
3459                 new_smi->addr_source_cleanup(new_smi);
3460                 new_smi->addr_source_cleanup = NULL;
3461         }
3462         if (new_smi->io_cleanup) {
3463                 new_smi->io_cleanup(new_smi);
3464                 new_smi->io_cleanup = NULL;
3465         }
3466
3467         if (new_smi->dev_registered) {
3468                 platform_device_unregister(new_smi->pdev);
3469                 new_smi->dev_registered = 0;
3470         }
3471
3472         return rv;
3473 }
3474
3475 static int init_ipmi_si(void)
3476 {
3477         int  i;
3478         char *str;
3479         int  rv;
3480         struct smi_info *e;
3481         enum ipmi_addr_src type = SI_INVALID;
3482
3483         if (initialized)
3484                 return 0;
3485         initialized = 1;
3486
3487         if (si_tryplatform) {
3488                 rv = platform_driver_register(&ipmi_driver);
3489                 if (rv) {
3490                         printk(KERN_ERR PFX "Unable to register "
3491                                "driver: %d\n", rv);
3492                         return rv;
3493                 }
3494         }
3495
3496         /* Parse out the si_type string into its components. */
3497         str = si_type_str;
3498         if (*str != '\0') {
3499                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
3500                         si_type[i] = str;
3501                         str = strchr(str, ',');
3502                         if (str) {
3503                                 *str = '\0';
3504                                 str++;
3505                         } else {
3506                                 break;
3507                         }
3508                 }
3509         }
3510
3511         printk(KERN_INFO "IPMI System Interface driver.\n");
3512
3513         /* If the user gave us a device, they presumably want us to use it */
3514         if (!hardcode_find_bmc())
3515                 return 0;
3516
3517 #ifdef CONFIG_PCI
3518         if (si_trypci) {
3519                 rv = pci_register_driver(&ipmi_pci_driver);
3520                 if (rv)
3521                         printk(KERN_ERR PFX "Unable to register "
3522                                "PCI driver: %d\n", rv);
3523                 else
3524                         pci_registered = 1;
3525         }
3526 #endif
3527
3528 #ifdef CONFIG_ACPI
3529         if (si_tryacpi) {
3530                 pnp_register_driver(&ipmi_pnp_driver);
3531                 pnp_registered = 1;
3532         }
3533 #endif
3534
3535 #ifdef CONFIG_DMI
3536         if (si_trydmi)
3537                 dmi_find_bmc();
3538 #endif
3539
3540 #ifdef CONFIG_ACPI
3541         if (si_tryacpi)
3542                 spmi_find_bmc();
3543 #endif
3544
3545 #ifdef CONFIG_PARISC
3546         register_parisc_driver(&ipmi_parisc_driver);
3547         parisc_registered = 1;
3548         /* poking PC IO addresses will crash machine, don't do it */
3549         si_trydefaults = 0;
3550 #endif
3551
3552         /* We prefer devices with interrupts, but in the case of a machine
3553            with multiple BMCs we assume that there will be several instances
3554            of a given type so if we succeed in registering a type then also
3555            try to register everything else of the same type */
3556
3557         mutex_lock(&smi_infos_lock);
3558         list_for_each_entry(e, &smi_infos, link) {
3559                 /* Try to register a device if it has an IRQ and we either
3560                    haven't successfully registered a device yet or this
3561                    device has the same type as one we successfully registered */
3562                 if (e->irq && (!type || e->addr_source == type)) {
3563                         if (!try_smi_init(e)) {
3564                                 type = e->addr_source;
3565                         }
3566                 }
3567         }
3568
3569         /* type will only have been set if we successfully registered an si */
3570         if (type) {
3571                 mutex_unlock(&smi_infos_lock);
3572                 return 0;
3573         }
3574
3575         /* Fall back to the preferred device */
3576
3577         list_for_each_entry(e, &smi_infos, link) {
3578                 if (!e->irq && (!type || e->addr_source == type)) {
3579                         if (!try_smi_init(e)) {
3580                                 type = e->addr_source;
3581                         }
3582                 }
3583         }
3584         mutex_unlock(&smi_infos_lock);
3585
3586         if (type)
3587                 return 0;
3588
3589         if (si_trydefaults) {
3590                 mutex_lock(&smi_infos_lock);
3591                 if (list_empty(&smi_infos)) {
3592                         /* No BMC was found, try defaults. */
3593                         mutex_unlock(&smi_infos_lock);
3594                         default_find_bmc();
3595                 } else
3596                         mutex_unlock(&smi_infos_lock);
3597         }
3598
3599         mutex_lock(&smi_infos_lock);
3600         if (unload_when_empty && list_empty(&smi_infos)) {
3601                 mutex_unlock(&smi_infos_lock);
3602                 cleanup_ipmi_si();
3603                 printk(KERN_WARNING PFX
3604                        "Unable to find any System Interface(s)\n");
3605                 return -ENODEV;
3606         } else {
3607                 mutex_unlock(&smi_infos_lock);
3608                 return 0;
3609         }
3610 }
3611 module_init(init_ipmi_si);
3612
3613 static void cleanup_one_si(struct smi_info *to_clean)
3614 {
3615         int           rv = 0;
3616         unsigned long flags;
3617
3618         if (!to_clean)
3619                 return;
3620
3621         list_del(&to_clean->link);
3622
3623         /* Tell the driver that we are shutting down. */
3624         atomic_inc(&to_clean->stop_operation);
3625
3626         /*
3627          * Make sure the timer and thread are stopped and will not run
3628          * again.
3629          */
3630         wait_for_timer_and_thread(to_clean);
3631
3632         /*
3633          * Timeouts are stopped, now make sure the interrupts are off
3634          * for the device.  A little tricky with locks to make sure
3635          * there are no races.
3636          */
3637         spin_lock_irqsave(&to_clean->si_lock, flags);
3638         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3639                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3640                 poll(to_clean);
3641                 schedule_timeout_uninterruptible(1);
3642                 spin_lock_irqsave(&to_clean->si_lock, flags);
3643         }
3644         disable_si_irq(to_clean);
3645         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3646         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3647                 poll(to_clean);
3648                 schedule_timeout_uninterruptible(1);
3649         }
3650
3651         /* Clean up interrupts and make sure that everything is done. */
3652         if (to_clean->irq_cleanup)
3653                 to_clean->irq_cleanup(to_clean);
3654         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3655                 poll(to_clean);
3656                 schedule_timeout_uninterruptible(1);
3657         }
3658
3659         if (to_clean->intf)
3660                 rv = ipmi_unregister_smi(to_clean->intf);
3661
3662         if (rv) {
3663                 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3664                        rv);
3665         }
3666
3667         if (to_clean->handlers)
3668                 to_clean->handlers->cleanup(to_clean->si_sm);
3669
3670         kfree(to_clean->si_sm);
3671
3672         if (to_clean->addr_source_cleanup)
3673                 to_clean->addr_source_cleanup(to_clean);
3674         if (to_clean->io_cleanup)
3675                 to_clean->io_cleanup(to_clean);
3676
3677         if (to_clean->dev_registered)
3678                 platform_device_unregister(to_clean->pdev);
3679
3680         kfree(to_clean);
3681 }
3682
3683 static void cleanup_ipmi_si(void)
3684 {
3685         struct smi_info *e, *tmp_e;
3686
3687         if (!initialized)
3688                 return;
3689
3690 #ifdef CONFIG_PCI
3691         if (pci_registered)
3692                 pci_unregister_driver(&ipmi_pci_driver);
3693 #endif
3694 #ifdef CONFIG_ACPI
3695         if (pnp_registered)
3696                 pnp_unregister_driver(&ipmi_pnp_driver);
3697 #endif
3698 #ifdef CONFIG_PARISC
3699         if (parisc_registered)
3700                 unregister_parisc_driver(&ipmi_parisc_driver);
3701 #endif
3702
3703         platform_driver_unregister(&ipmi_driver);
3704
3705         mutex_lock(&smi_infos_lock);
3706         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3707                 cleanup_one_si(e);
3708         mutex_unlock(&smi_infos_lock);
3709 }
3710 module_exit(cleanup_ipmi_si);
3711
3712 MODULE_LICENSE("GPL");
3713 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3714 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3715                    " system interfaces.");