2 * PowerNV OPAL high level interfaces
4 * Copyright 2011 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/types.h>
16 #include <linux/of_platform.h>
17 #include <linux/interrupt.h>
18 #include <linux/notifier.h>
19 #include <linux/slab.h>
21 #include <asm/firmware.h>
30 static struct device_node *opal_node;
31 static DEFINE_SPINLOCK(opal_write_lock);
32 extern u64 opal_mc_secondary_handler[];
33 static unsigned int *opal_irqs;
34 static unsigned int opal_irq_count;
35 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
36 static DEFINE_SPINLOCK(opal_notifier_lock);
37 static uint64_t last_notified_mask = 0x0ul;
38 static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
40 int __init early_init_dt_scan_opal(unsigned long node,
41 const char *uname, int depth, void *data)
43 const void *basep, *entryp;
44 unsigned long basesz, entrysz;
46 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
49 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
50 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
52 if (!basep || !entryp)
55 opal.base = of_read_number(basep, basesz/4);
56 opal.entry = of_read_number(entryp, entrysz/4);
58 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
59 opal.base, basep, basesz);
60 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
61 opal.entry, entryp, entrysz);
63 powerpc_firmware_features |= FW_FEATURE_OPAL;
64 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
65 powerpc_firmware_features |= FW_FEATURE_OPALv2;
66 powerpc_firmware_features |= FW_FEATURE_OPALv3;
67 printk("OPAL V3 detected !\n");
68 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
69 powerpc_firmware_features |= FW_FEATURE_OPALv2;
70 printk("OPAL V2 detected !\n");
72 printk("OPAL V1 detected !\n");
78 static int __init opal_register_exception_handlers(void)
83 if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
86 /* Hookup some exception handlers. We use the fwnmi area at 0x7000
87 * to provide the glue space to OPAL
90 opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
91 __pa(opal_mc_secondary_handler[0]),
94 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
97 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
103 early_initcall(opal_register_exception_handlers);
105 int opal_notifier_register(struct notifier_block *nb)
108 pr_warning("%s: Invalid argument (%p)\n",
113 atomic_notifier_chain_register(&opal_notifier_head, nb);
117 static void opal_do_notifier(uint64_t events)
120 uint64_t changed_mask;
122 if (atomic_read(&opal_notifier_hold))
125 spin_lock_irqsave(&opal_notifier_lock, flags);
126 changed_mask = last_notified_mask ^ events;
127 last_notified_mask = events;
128 spin_unlock_irqrestore(&opal_notifier_lock, flags);
131 * We feed with the event bits and changed bits for
132 * enough information to the callback.
134 atomic_notifier_call_chain(&opal_notifier_head,
135 events, (void *)changed_mask);
138 void opal_notifier_update_evt(uint64_t evt_mask,
143 spin_lock_irqsave(&opal_notifier_lock, flags);
144 last_notified_mask &= ~evt_mask;
145 last_notified_mask |= evt_val;
146 spin_unlock_irqrestore(&opal_notifier_lock, flags);
149 void opal_notifier_enable(void)
154 atomic_set(&opal_notifier_hold, 0);
156 /* Process pending events */
157 rc = opal_poll_events(&evt);
158 if (rc == OPAL_SUCCESS && evt)
159 opal_do_notifier(evt);
162 void opal_notifier_disable(void)
164 atomic_set(&opal_notifier_hold, 1);
167 int opal_get_chars(uint32_t vtermno, char *buf, int count)
174 opal_poll_events(&evt);
175 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
177 len = cpu_to_be64(count);
178 rc = opal_console_read(vtermno, &len, buf);
179 if (rc == OPAL_SUCCESS)
180 return be64_to_cpu(len);
184 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
195 /* We want put_chars to be atomic to avoid mangling of hvsi
196 * packets. To do that, we first test for room and return
197 * -EAGAIN if there isn't enough.
199 * Unfortunately, opal_console_write_buffer_space() doesn't
200 * appear to work on opal v1, so we just assume there is
201 * enough room and be done with it
203 spin_lock_irqsave(&opal_write_lock, flags);
204 if (firmware_has_feature(FW_FEATURE_OPALv2)) {
205 rc = opal_console_write_buffer_space(vtermno, &olen);
206 len = be64_to_cpu(olen);
207 if (rc || len < total_len) {
208 spin_unlock_irqrestore(&opal_write_lock, flags);
209 /* Closed -> drop characters */
212 opal_poll_events(NULL);
217 /* We still try to handle partial completions, though they
218 * should no longer happen.
221 while(total_len > 0 && (rc == OPAL_BUSY ||
222 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
223 olen = cpu_to_be64(total_len);
224 rc = opal_console_write(vtermno, &olen, data);
225 len = be64_to_cpu(olen);
227 /* Closed or other error drop */
228 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
229 rc != OPAL_BUSY_EVENT) {
233 if (rc == OPAL_SUCCESS) {
238 /* This is a bit nasty but we need that for the console to
239 * flush when there aren't any interrupts. We will clean
240 * things a bit later to limit that to synchronous path
241 * such as the kernel console and xmon/udbg
244 opal_poll_events(&evt);
245 while(rc == OPAL_SUCCESS &&
246 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
248 spin_unlock_irqrestore(&opal_write_lock, flags);
252 int opal_machine_check(struct pt_regs *regs)
254 struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt;
255 struct opal_machine_check_event evt;
256 const char *level, *sevstr, *subtype;
257 static const char *opal_mc_ue_types[] = {
260 "Page table walk ifetch",
262 "Page table walk Load/Store",
264 static const char *opal_mc_slb_types[] = {
269 static const char *opal_mc_erat_types[] = {
274 static const char *opal_mc_tlb_types[] = {
280 /* Copy the event structure and release the original */
282 opal_evt->in_use = 0;
284 /* Print things out */
285 if (evt.version != OpalMCE_V1) {
286 pr_err("Machine Check Exception, Unknown event version %d !\n",
290 switch(evt.severity) {
291 case OpalMCE_SEV_NO_ERROR:
295 case OpalMCE_SEV_WARNING:
296 level = KERN_WARNING;
299 case OpalMCE_SEV_ERROR_SYNC:
303 case OpalMCE_SEV_FATAL:
310 printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
311 evt.disposition == OpalMCE_DISPOSITION_RECOVERED ?
312 "Recovered" : "[Not recovered");
313 printk("%s Initiator: %s\n", level,
314 evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
315 switch(evt.error_type) {
316 case OpalMCE_ERROR_TYPE_UE:
317 subtype = evt.u.ue_error.ue_error_type <
318 ARRAY_SIZE(opal_mc_ue_types) ?
319 opal_mc_ue_types[evt.u.ue_error.ue_error_type]
321 printk("%s Error type: UE [%s]\n", level, subtype);
322 if (evt.u.ue_error.effective_address_provided)
323 printk("%s Effective address: %016llx\n",
324 level, evt.u.ue_error.effective_address);
325 if (evt.u.ue_error.physical_address_provided)
326 printk("%s Physial address: %016llx\n",
327 level, evt.u.ue_error.physical_address);
329 case OpalMCE_ERROR_TYPE_SLB:
330 subtype = evt.u.slb_error.slb_error_type <
331 ARRAY_SIZE(opal_mc_slb_types) ?
332 opal_mc_slb_types[evt.u.slb_error.slb_error_type]
334 printk("%s Error type: SLB [%s]\n", level, subtype);
335 if (evt.u.slb_error.effective_address_provided)
336 printk("%s Effective address: %016llx\n",
337 level, evt.u.slb_error.effective_address);
339 case OpalMCE_ERROR_TYPE_ERAT:
340 subtype = evt.u.erat_error.erat_error_type <
341 ARRAY_SIZE(opal_mc_erat_types) ?
342 opal_mc_erat_types[evt.u.erat_error.erat_error_type]
344 printk("%s Error type: ERAT [%s]\n", level, subtype);
345 if (evt.u.erat_error.effective_address_provided)
346 printk("%s Effective address: %016llx\n",
347 level, evt.u.erat_error.effective_address);
349 case OpalMCE_ERROR_TYPE_TLB:
350 subtype = evt.u.tlb_error.tlb_error_type <
351 ARRAY_SIZE(opal_mc_tlb_types) ?
352 opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
354 printk("%s Error type: TLB [%s]\n", level, subtype);
355 if (evt.u.tlb_error.effective_address_provided)
356 printk("%s Effective address: %016llx\n",
357 level, evt.u.tlb_error.effective_address);
360 case OpalMCE_ERROR_TYPE_UNKNOWN:
361 printk("%s Error type: Unknown\n", level);
364 return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
367 static irqreturn_t opal_interrupt(int irq, void *data)
371 opal_handle_interrupt(virq_to_hw(irq), &events);
373 opal_do_notifier(events);
378 static int __init opal_init(void)
380 struct device_node *np, *consoles;
384 opal_node = of_find_node_by_path("/ibm,opal");
386 pr_warn("opal: Node not found\n");
390 /* Register OPAL consoles if any ports */
391 if (firmware_has_feature(FW_FEATURE_OPALv2))
392 consoles = of_find_node_by_path("/ibm,opal/consoles");
394 consoles = of_node_get(opal_node);
396 for_each_child_of_node(consoles, np) {
397 if (strcmp(np->name, "serial"))
399 of_platform_device_create(np, NULL, NULL);
401 of_node_put(consoles);
404 /* Find all OPAL interrupts and request them */
405 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
406 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
407 irqs ? (irqlen / 4) : 0);
408 opal_irq_count = irqlen / 4;
409 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
410 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
411 unsigned int hwirq = be32_to_cpup(irqs);
412 unsigned int irq = irq_create_mapping(NULL, hwirq);
414 pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
417 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
419 pr_warning("opal: Error %d requesting irq %d"
420 " (0x%x)\n", rc, irq, hwirq);
425 subsys_initcall(opal_init);
427 void opal_shutdown(void)
431 for (i = 0; i < opal_irq_count; i++) {
433 free_irq(opal_irqs[i], NULL);