]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/unisys/visorbus/visorchipset.c
3d0866148870e29755823c3c4c7c904c95b84150
[karo-tx-linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
38
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE   50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
50
51
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
53
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
58
59 /*
60  * Module parameters
61  */
62 static int visorchipset_major;
63 static int visorchipset_visorbusregwait = 1;    /* default is on */
64 static int visorchipset_holdchipsetready;
65 static unsigned long controlvm_payload_bytes_buffered;
66
67 static int
68 visorchipset_open(struct inode *inode, struct file *file)
69 {
70         unsigned minor_number = iminor(inode);
71
72         if (minor_number)
73                 return -ENODEV;
74         file->private_data = NULL;
75         return 0;
76 }
77
78 static int
79 visorchipset_release(struct inode *inode, struct file *file)
80 {
81         return 0;
82 }
83
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode.  As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
87 */
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90 static unsigned long most_recent_message_jiffies;       /* when we got our last
91                                                  * controlvm message */
92 static int visorbusregistered;
93
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
96
97 struct parser_context {
98         unsigned long allocbytes;
99         unsigned long param_bytes;
100         u8 *curr;
101         unsigned long bytes_remaining;
102         bool byte_stream;
103         char data[0];
104 };
105
106 static struct delayed_work periodic_controlvm_work;
107 static struct workqueue_struct *periodic_controlvm_workqueue;
108 static DEFINE_SEMAPHORE(notifier_lock);
109
110 static struct cdev file_cdev;
111 static struct visorchannel **file_controlvm_channel;
112 static struct controlvm_message_header g_chipset_msg_hdr;
113 static const uuid_le spar_diag_pool_channel_protocol_uuid =
114         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
115 /* 0xffffff is an invalid Bus/Device number */
116 static u32 g_diagpool_bus_no = 0xffffff;
117 static u32 g_diagpool_dev_no = 0xffffff;
118 static struct controlvm_message_packet g_devicechangestate_packet;
119
120 #define is_diagpool_channel(channel_type_guid) \
121         (uuid_le_cmp(channel_type_guid,\
122                      spar_diag_pool_channel_protocol_uuid) == 0)
123
124 static LIST_HEAD(bus_info_list);
125 static LIST_HEAD(dev_info_list);
126
127 static struct visorchannel *controlvm_channel;
128
129 /* Manages the request payload in the controlvm channel */
130 struct visor_controlvm_payload_info {
131         u8 __iomem *ptr;        /* pointer to base address of payload pool */
132         u64 offset;             /* offset from beginning of controlvm
133                                  * channel to beginning of payload * pool */
134         u32 bytes;              /* number of bytes in payload pool */
135 };
136
137 static struct visor_controlvm_payload_info controlvm_payload_info;
138
139 /* The following globals are used to handle the scenario where we are unable to
140  * offload the payload from a controlvm message due to memory requirements.  In
141  * this scenario, we simply stash the controlvm message, then attempt to
142  * process it again the next time controlvm_periodic_work() runs.
143  */
144 static struct controlvm_message controlvm_pending_msg;
145 static bool controlvm_pending_msg_valid;
146
147 /* This identifies a data buffer that has been received via a controlvm messages
148  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
149  */
150 struct putfile_buffer_entry {
151         struct list_head next;  /* putfile_buffer_entry list */
152         struct parser_context *parser_ctx; /* points to input data buffer */
153 };
154
155 /* List of struct putfile_request *, via next_putfile_request member.
156  * Each entry in this list identifies an outstanding TRANSMIT_FILE
157  * conversation.
158  */
159 static LIST_HEAD(putfile_request_list);
160
161 /* This describes a buffer and its current state of transfer (e.g., how many
162  * bytes have already been supplied as putfile data, and how many bytes are
163  * remaining) for a putfile_request.
164  */
165 struct putfile_active_buffer {
166         /* a payload from a controlvm message, containing a file data buffer */
167         struct parser_context *parser_ctx;
168         /* points within data area of parser_ctx to next byte of data */
169         u8 *pnext;
170         /* # bytes left from <pnext> to the end of this data buffer */
171         size_t bytes_remaining;
172 };
173
174 #define PUTFILE_REQUEST_SIG 0x0906101302281211
175 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176  * conversation.  Structs of this type are dynamically linked into
177  * <Putfile_request_list>.
178  */
179 struct putfile_request {
180         u64 sig;                /* PUTFILE_REQUEST_SIG */
181
182         /* header from original TransmitFile request */
183         struct controlvm_message_header controlvm_header;
184         u64 file_request_number;        /* from original TransmitFile request */
185
186         /* link to next struct putfile_request */
187         struct list_head next_putfile_request;
188
189         /* most-recent sequence number supplied via a controlvm message */
190         u64 data_sequence_number;
191
192         /* head of putfile_buffer_entry list, which describes the data to be
193          * supplied as putfile data;
194          * - this list is added to when controlvm messages come in that supply
195          * file data
196          * - this list is removed from via the hotplug program that is actually
197          * consuming these buffers to write as file data */
198         struct list_head input_buffer_list;
199         spinlock_t req_list_lock;       /* lock for input_buffer_list */
200
201         /* waiters for input_buffer_list to go non-empty */
202         wait_queue_head_t input_buffer_wq;
203
204         /* data not yet read within current putfile_buffer_entry */
205         struct putfile_active_buffer active_buf;
206
207         /* <0 = failed, 0 = in-progress, >0 = successful; */
208         /* note that this must be set with req_list_lock, and if you set <0, */
209         /* it is your responsibility to also free up all of the other objects */
210         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211         /* before releasing the lock */
212         int completion_status;
213 };
214
215 struct parahotplug_request {
216         struct list_head list;
217         int id;
218         unsigned long expiration;
219         struct controlvm_message msg;
220 };
221
222 static LIST_HEAD(parahotplug_request_list);
223 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
224 static void parahotplug_process_list(void);
225
226 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227  * CONTROLVM_REPORTEVENT.
228  */
229 static struct visorchipset_busdev_notifiers busdev_notifiers;
230
231 static void bus_create_response(struct visorchipset_bus_info *p, int response);
232 static void bus_destroy_response(struct visorchipset_bus_info *p, int response);
233 static void device_create_response(struct visorchipset_device_info *p,
234                                    int response);
235 static void device_destroy_response(struct visorchipset_device_info *p,
236                                     int response);
237 static void device_resume_response(struct visorchipset_device_info *p,
238                                    int response);
239
240 static void
241 visorchipset_device_pause_response(struct visorchipset_device_info *p,
242                                    int response);
243
244 static struct visorchipset_busdev_responders busdev_responders = {
245         .bus_create = bus_create_response,
246         .bus_destroy = bus_destroy_response,
247         .device_create = device_create_response,
248         .device_destroy = device_destroy_response,
249         .device_pause = visorchipset_device_pause_response,
250         .device_resume = device_resume_response,
251 };
252
253 /* info for /dev/visorchipset */
254 static dev_t major_dev = -1; /**< indicates major num for device */
255
256 /* prototypes for attributes */
257 static ssize_t toolaction_show(struct device *dev,
258                                struct device_attribute *attr, char *buf);
259 static ssize_t toolaction_store(struct device *dev,
260                                 struct device_attribute *attr,
261                                 const char *buf, size_t count);
262 static DEVICE_ATTR_RW(toolaction);
263
264 static ssize_t boottotool_show(struct device *dev,
265                                struct device_attribute *attr, char *buf);
266 static ssize_t boottotool_store(struct device *dev,
267                                 struct device_attribute *attr, const char *buf,
268                                 size_t count);
269 static DEVICE_ATTR_RW(boottotool);
270
271 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
272                           char *buf);
273 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
274                            const char *buf, size_t count);
275 static DEVICE_ATTR_RW(error);
276
277 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
278                            char *buf);
279 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
280                             const char *buf, size_t count);
281 static DEVICE_ATTR_RW(textid);
282
283 static ssize_t remaining_steps_show(struct device *dev,
284                                     struct device_attribute *attr, char *buf);
285 static ssize_t remaining_steps_store(struct device *dev,
286                                      struct device_attribute *attr,
287                                      const char *buf, size_t count);
288 static DEVICE_ATTR_RW(remaining_steps);
289
290 static ssize_t chipsetready_store(struct device *dev,
291                                   struct device_attribute *attr,
292                                   const char *buf, size_t count);
293 static DEVICE_ATTR_WO(chipsetready);
294
295 static ssize_t devicedisabled_store(struct device *dev,
296                                     struct device_attribute *attr,
297                                     const char *buf, size_t count);
298 static DEVICE_ATTR_WO(devicedisabled);
299
300 static ssize_t deviceenabled_store(struct device *dev,
301                                    struct device_attribute *attr,
302                                    const char *buf, size_t count);
303 static DEVICE_ATTR_WO(deviceenabled);
304
305 static struct attribute *visorchipset_install_attrs[] = {
306         &dev_attr_toolaction.attr,
307         &dev_attr_boottotool.attr,
308         &dev_attr_error.attr,
309         &dev_attr_textid.attr,
310         &dev_attr_remaining_steps.attr,
311         NULL
312 };
313
314 static struct attribute_group visorchipset_install_group = {
315         .name = "install",
316         .attrs = visorchipset_install_attrs
317 };
318
319 static struct attribute *visorchipset_guest_attrs[] = {
320         &dev_attr_chipsetready.attr,
321         NULL
322 };
323
324 static struct attribute_group visorchipset_guest_group = {
325         .name = "guest",
326         .attrs = visorchipset_guest_attrs
327 };
328
329 static struct attribute *visorchipset_parahotplug_attrs[] = {
330         &dev_attr_devicedisabled.attr,
331         &dev_attr_deviceenabled.attr,
332         NULL
333 };
334
335 static struct attribute_group visorchipset_parahotplug_group = {
336         .name = "parahotplug",
337         .attrs = visorchipset_parahotplug_attrs
338 };
339
340 static const struct attribute_group *visorchipset_dev_groups[] = {
341         &visorchipset_install_group,
342         &visorchipset_guest_group,
343         &visorchipset_parahotplug_group,
344         NULL
345 };
346
347 /* /sys/devices/platform/visorchipset */
348 static struct platform_device visorchipset_platform_device = {
349         .name = "visorchipset",
350         .id = -1,
351         .dev.groups = visorchipset_dev_groups,
352 };
353
354 /* Function prototypes */
355 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
356                               int response);
357 static void controlvm_respond_chipset_init(
358                 struct controlvm_message_header *msg_hdr, int response,
359                 enum ultra_chipset_feature features);
360 static void controlvm_respond_physdev_changestate(
361                 struct controlvm_message_header *msg_hdr, int response,
362                 struct spar_segment_state state);
363
364
365 static void parser_done(struct parser_context *ctx);
366
367 static struct parser_context *
368 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
369 {
370         int allocbytes = sizeof(struct parser_context) + bytes;
371         struct parser_context *rc = NULL;
372         struct parser_context *ctx = NULL;
373
374         if (retry)
375                 *retry = false;
376
377         /*
378          * alloc an 0 extra byte to ensure payload is
379          * '\0'-terminated
380          */
381         allocbytes++;
382         if ((controlvm_payload_bytes_buffered + bytes)
383             > MAX_CONTROLVM_PAYLOAD_BYTES) {
384                 if (retry)
385                         *retry = true;
386                 rc = NULL;
387                 goto cleanup;
388         }
389         ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
390         if (!ctx) {
391                 if (retry)
392                         *retry = true;
393                 rc = NULL;
394                 goto cleanup;
395         }
396
397         ctx->allocbytes = allocbytes;
398         ctx->param_bytes = bytes;
399         ctx->curr = NULL;
400         ctx->bytes_remaining = 0;
401         ctx->byte_stream = false;
402         if (local) {
403                 void *p;
404
405                 if (addr > virt_to_phys(high_memory - 1)) {
406                         rc = NULL;
407                         goto cleanup;
408                 }
409                 p = __va((unsigned long) (addr));
410                 memcpy(ctx->data, p, bytes);
411         } else {
412                 void __iomem *mapping;
413
414                 if (!request_mem_region(addr, bytes, "visorchipset")) {
415                         rc = NULL;
416                         goto cleanup;
417                 }
418
419                 mapping = ioremap_cache(addr, bytes);
420                 if (!mapping) {
421                         release_mem_region(addr, bytes);
422                         rc = NULL;
423                         goto cleanup;
424                 }
425                 memcpy_fromio(ctx->data, mapping, bytes);
426                 release_mem_region(addr, bytes);
427         }
428
429         ctx->byte_stream = true;
430         rc = ctx;
431 cleanup:
432         if (rc) {
433                 controlvm_payload_bytes_buffered += ctx->param_bytes;
434         } else {
435                 if (ctx) {
436                         parser_done(ctx);
437                         ctx = NULL;
438                 }
439         }
440         return rc;
441 }
442
443 static uuid_le
444 parser_id_get(struct parser_context *ctx)
445 {
446         struct spar_controlvm_parameters_header *phdr = NULL;
447
448         if (ctx == NULL)
449                 return NULL_UUID_LE;
450         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
451         return phdr->id;
452 }
453
454 /** Describes the state from the perspective of which controlvm messages have
455  *  been received for a bus or device.
456  */
457
458 enum PARSER_WHICH_STRING {
459         PARSERSTRING_INITIATOR,
460         PARSERSTRING_TARGET,
461         PARSERSTRING_CONNECTION,
462         PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
463 };
464
465 static void
466 parser_param_start(struct parser_context *ctx,
467                    enum PARSER_WHICH_STRING which_string)
468 {
469         struct spar_controlvm_parameters_header *phdr = NULL;
470
471         if (ctx == NULL)
472                 goto Away;
473         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
474         switch (which_string) {
475         case PARSERSTRING_INITIATOR:
476                 ctx->curr = ctx->data + phdr->initiator_offset;
477                 ctx->bytes_remaining = phdr->initiator_length;
478                 break;
479         case PARSERSTRING_TARGET:
480                 ctx->curr = ctx->data + phdr->target_offset;
481                 ctx->bytes_remaining = phdr->target_length;
482                 break;
483         case PARSERSTRING_CONNECTION:
484                 ctx->curr = ctx->data + phdr->connection_offset;
485                 ctx->bytes_remaining = phdr->connection_length;
486                 break;
487         case PARSERSTRING_NAME:
488                 ctx->curr = ctx->data + phdr->name_offset;
489                 ctx->bytes_remaining = phdr->name_length;
490                 break;
491         default:
492                 break;
493         }
494
495 Away:
496         return;
497 }
498
499 static void parser_done(struct parser_context *ctx)
500 {
501         if (!ctx)
502                 return;
503         controlvm_payload_bytes_buffered -= ctx->param_bytes;
504         kfree(ctx);
505 }
506
507 static void *
508 parser_string_get(struct parser_context *ctx)
509 {
510         u8 *pscan;
511         unsigned long nscan;
512         int value_length = -1;
513         void *value = NULL;
514         int i;
515
516         if (!ctx)
517                 return NULL;
518         pscan = ctx->curr;
519         nscan = ctx->bytes_remaining;
520         if (nscan == 0)
521                 return NULL;
522         if (!pscan)
523                 return NULL;
524         for (i = 0, value_length = -1; i < nscan; i++)
525                 if (pscan[i] == '\0') {
526                         value_length = i;
527                         break;
528                 }
529         if (value_length < 0)   /* '\0' was not included in the length */
530                 value_length = nscan;
531         value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
532         if (value == NULL)
533                 return NULL;
534         if (value_length > 0)
535                 memcpy(value, pscan, value_length);
536         ((u8 *) (value))[value_length] = '\0';
537         return value;
538 }
539
540
541 static ssize_t toolaction_show(struct device *dev,
542                                struct device_attribute *attr,
543                                char *buf)
544 {
545         u8 tool_action;
546
547         visorchannel_read(controlvm_channel,
548                 offsetof(struct spar_controlvm_channel_protocol,
549                          tool_action), &tool_action, sizeof(u8));
550         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
551 }
552
553 static ssize_t toolaction_store(struct device *dev,
554                                 struct device_attribute *attr,
555                                 const char *buf, size_t count)
556 {
557         u8 tool_action;
558         int ret;
559
560         if (kstrtou8(buf, 10, &tool_action))
561                 return -EINVAL;
562
563         ret = visorchannel_write(controlvm_channel,
564                 offsetof(struct spar_controlvm_channel_protocol,
565                          tool_action),
566                 &tool_action, sizeof(u8));
567
568         if (ret)
569                 return ret;
570         return count;
571 }
572
573 static ssize_t boottotool_show(struct device *dev,
574                                struct device_attribute *attr,
575                                char *buf)
576 {
577         struct efi_spar_indication efi_spar_indication;
578
579         visorchannel_read(controlvm_channel,
580                           offsetof(struct spar_controlvm_channel_protocol,
581                                    efi_spar_ind), &efi_spar_indication,
582                           sizeof(struct efi_spar_indication));
583         return scnprintf(buf, PAGE_SIZE, "%u\n",
584                          efi_spar_indication.boot_to_tool);
585 }
586
587 static ssize_t boottotool_store(struct device *dev,
588                                 struct device_attribute *attr,
589                                 const char *buf, size_t count)
590 {
591         int val, ret;
592         struct efi_spar_indication efi_spar_indication;
593
594         if (kstrtoint(buf, 10, &val))
595                 return -EINVAL;
596
597         efi_spar_indication.boot_to_tool = val;
598         ret = visorchannel_write(controlvm_channel,
599                         offsetof(struct spar_controlvm_channel_protocol,
600                                  efi_spar_ind), &(efi_spar_indication),
601                                  sizeof(struct efi_spar_indication));
602
603         if (ret)
604                 return ret;
605         return count;
606 }
607
608 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
609                           char *buf)
610 {
611         u32 error;
612
613         visorchannel_read(controlvm_channel,
614                           offsetof(struct spar_controlvm_channel_protocol,
615                                    installation_error),
616                           &error, sizeof(u32));
617         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
618 }
619
620 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
621                            const char *buf, size_t count)
622 {
623         u32 error;
624         int ret;
625
626         if (kstrtou32(buf, 10, &error))
627                 return -EINVAL;
628
629         ret = visorchannel_write(controlvm_channel,
630                 offsetof(struct spar_controlvm_channel_protocol,
631                          installation_error),
632                 &error, sizeof(u32));
633         if (ret)
634                 return ret;
635         return count;
636 }
637
638 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
639                            char *buf)
640 {
641         u32 text_id;
642
643         visorchannel_read(controlvm_channel,
644                           offsetof(struct spar_controlvm_channel_protocol,
645                                    installation_text_id),
646                           &text_id, sizeof(u32));
647         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
648 }
649
650 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
651                             const char *buf, size_t count)
652 {
653         u32 text_id;
654         int ret;
655
656         if (kstrtou32(buf, 10, &text_id))
657                 return -EINVAL;
658
659         ret = visorchannel_write(controlvm_channel,
660                 offsetof(struct spar_controlvm_channel_protocol,
661                          installation_text_id),
662                 &text_id, sizeof(u32));
663         if (ret)
664                 return ret;
665         return count;
666 }
667
668 static ssize_t remaining_steps_show(struct device *dev,
669                                     struct device_attribute *attr, char *buf)
670 {
671         u16 remaining_steps;
672
673         visorchannel_read(controlvm_channel,
674                           offsetof(struct spar_controlvm_channel_protocol,
675                                    installation_remaining_steps),
676                           &remaining_steps, sizeof(u16));
677         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
678 }
679
680 static ssize_t remaining_steps_store(struct device *dev,
681                                      struct device_attribute *attr,
682                                      const char *buf, size_t count)
683 {
684         u16 remaining_steps;
685         int ret;
686
687         if (kstrtou16(buf, 10, &remaining_steps))
688                 return -EINVAL;
689
690         ret = visorchannel_write(controlvm_channel,
691                 offsetof(struct spar_controlvm_channel_protocol,
692                          installation_remaining_steps),
693                 &remaining_steps, sizeof(u16));
694         if (ret)
695                 return ret;
696         return count;
697 }
698
699 static void
700 bus_info_clear(void *v)
701 {
702         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
703
704         kfree(p->name);
705         kfree(p->description);
706         memset(p, 0, sizeof(struct visorchipset_bus_info));
707 }
708
709 static void
710 dev_info_clear(void *v)
711 {
712         struct visorchipset_device_info *p =
713                 (struct visorchipset_device_info *) v;
714
715         memset(p, 0, sizeof(struct visorchipset_device_info));
716 }
717
718 struct visor_busdev {
719         u32 bus_no;
720         u32 dev_no;
721 };
722
723 static int match_visorbus_dev_by_id(struct device *dev, void *data)
724 {
725         struct visor_device *vdev = to_visor_device(dev);
726         struct visor_busdev *id = (struct visor_busdev *)data;
727         u32 bus_no = id->bus_no;
728         u32 dev_no = id->dev_no;
729
730         if (((bus_no == -1) || (vdev->chipset_bus_no == bus_no)) &&
731             ((dev_no == -1) || (vdev->chipset_dev_no == dev_no)))
732                 return 1;
733
734         return 0;
735 }
736 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
737                                                struct visor_device *from)
738 {
739         struct device *dev;
740         struct device *dev_start = NULL;
741         struct visor_device *vdev = NULL;
742         struct visor_busdev id = {
743                         .bus_no = bus_no,
744                         .dev_no = dev_no
745                 };
746
747         if (from)
748                 dev_start = &from->device;
749         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
750                               match_visorbus_dev_by_id);
751         if (dev)
752                 vdev = to_visor_device(dev);
753         return vdev;
754 }
755 EXPORT_SYMBOL(visorbus_get_device_by_id);
756
757 static struct visorchipset_bus_info *
758 bus_find(struct list_head *list, u32 bus_no)
759 {
760         struct visorchipset_bus_info *p;
761
762         list_for_each_entry(p, list, entry) {
763                 if (p->bus_no == bus_no)
764                         return p;
765         }
766
767         return NULL;
768 }
769
770 static struct visorchipset_device_info *
771 device_find(struct list_head *list, u32 bus_no, u32 dev_no)
772 {
773         struct visorchipset_device_info *p;
774
775         list_for_each_entry(p, list, entry) {
776                 if (p->bus_no == bus_no && p->dev_no == dev_no)
777                         return p;
778         }
779
780         return NULL;
781 }
782
783 static void busdevices_del(struct list_head *list, u32 bus_no)
784 {
785         struct visorchipset_device_info *p, *tmp;
786
787         list_for_each_entry_safe(p, tmp, list, entry) {
788                 if (p->bus_no == bus_no) {
789                         list_del(&p->entry);
790                         kfree(p);
791                 }
792         }
793 }
794
795 static u8
796 check_chipset_events(void)
797 {
798         int i;
799         u8 send_msg = 1;
800         /* Check events to determine if response should be sent */
801         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
802                 send_msg &= chipset_events[i];
803         return send_msg;
804 }
805
806 static void
807 clear_chipset_events(void)
808 {
809         int i;
810         /* Clear chipset_events */
811         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
812                 chipset_events[i] = 0;
813 }
814
815 void
816 visorchipset_register_busdev(
817                         struct visorchipset_busdev_notifiers *notifiers,
818                         struct visorchipset_busdev_responders *responders,
819                         struct ultra_vbus_deviceinfo *driver_info)
820 {
821         down(&notifier_lock);
822         if (!notifiers) {
823                 memset(&busdev_notifiers, 0,
824                        sizeof(busdev_notifiers));
825                 visorbusregistered = 0; /* clear flag */
826         } else {
827                 busdev_notifiers = *notifiers;
828                 visorbusregistered = 1; /* set flag */
829         }
830         if (responders)
831                 *responders = busdev_responders;
832         if (driver_info)
833                 bus_device_info_init(driver_info, "chipset", "visorchipset",
834                                      VERSION, NULL);
835
836         up(&notifier_lock);
837 }
838 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
839
840 static void
841 cleanup_controlvm_structures(void)
842 {
843         struct visorchipset_bus_info *bi, *tmp_bi;
844         struct visorchipset_device_info *di, *tmp_di;
845
846         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
847                 bus_info_clear(bi);
848                 list_del(&bi->entry);
849                 kfree(bi);
850         }
851
852         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
853                 dev_info_clear(di);
854                 list_del(&di->entry);
855                 kfree(di);
856         }
857 }
858
859 static void
860 chipset_init(struct controlvm_message *inmsg)
861 {
862         static int chipset_inited;
863         enum ultra_chipset_feature features = 0;
864         int rc = CONTROLVM_RESP_SUCCESS;
865
866         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
867         if (chipset_inited) {
868                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
869                 goto cleanup;
870         }
871         chipset_inited = 1;
872         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
873
874         /* Set features to indicate we support parahotplug (if Command
875          * also supports it). */
876         features =
877             inmsg->cmd.init_chipset.
878             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
879
880         /* Set the "reply" bit so Command knows this is a
881          * features-aware driver. */
882         features |= ULTRA_CHIPSET_FEATURE_REPLY;
883
884 cleanup:
885         if (rc < 0)
886                 cleanup_controlvm_structures();
887         if (inmsg->hdr.flags.response_expected)
888                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
889 }
890
891 static void
892 controlvm_init_response(struct controlvm_message *msg,
893                         struct controlvm_message_header *msg_hdr, int response)
894 {
895         memset(msg, 0, sizeof(struct controlvm_message));
896         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
897         msg->hdr.payload_bytes = 0;
898         msg->hdr.payload_vm_offset = 0;
899         msg->hdr.payload_max_bytes = 0;
900         if (response < 0) {
901                 msg->hdr.flags.failed = 1;
902                 msg->hdr.completion_status = (u32) (-response);
903         }
904 }
905
906 static void
907 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
908 {
909         struct controlvm_message outmsg;
910
911         controlvm_init_response(&outmsg, msg_hdr, response);
912         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
913         * back the deviceChangeState structure in the packet. */
914         if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
915             g_devicechangestate_packet.device_change_state.bus_no ==
916             g_diagpool_bus_no &&
917             g_devicechangestate_packet.device_change_state.dev_no ==
918             g_diagpool_dev_no)
919                 outmsg.cmd = g_devicechangestate_packet;
920         if (outmsg.hdr.flags.test_message == 1)
921                 return;
922
923         if (!visorchannel_signalinsert(controlvm_channel,
924                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
925                 return;
926         }
927 }
928
929 static void
930 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
931                                int response,
932                                enum ultra_chipset_feature features)
933 {
934         struct controlvm_message outmsg;
935
936         controlvm_init_response(&outmsg, msg_hdr, response);
937         outmsg.cmd.init_chipset.features = features;
938         if (!visorchannel_signalinsert(controlvm_channel,
939                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
940                 return;
941         }
942 }
943
944 static void controlvm_respond_physdev_changestate(
945                 struct controlvm_message_header *msg_hdr, int response,
946                 struct spar_segment_state state)
947 {
948         struct controlvm_message outmsg;
949
950         controlvm_init_response(&outmsg, msg_hdr, response);
951         outmsg.cmd.device_change_state.state = state;
952         outmsg.cmd.device_change_state.flags.phys_device = 1;
953         if (!visorchannel_signalinsert(controlvm_channel,
954                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
955                 return;
956         }
957 }
958
959 enum crash_obj_type {
960         CRASH_DEV,
961         CRASH_BUS,
962 };
963
964 static void
965 bus_responder(enum controlvm_id cmd_id, struct visorchipset_bus_info *p,
966               int response)
967 {
968         bool need_clear = false;
969         u32 bus_no = p->bus_no;
970
971         if (!p)
972                 return;
973
974         if (response < 0) {
975                 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
976                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
977                         /* undo the row we just created... */
978                         busdevices_del(&dev_info_list, bus_no);
979         } else {
980                 if (cmd_id == CONTROLVM_BUS_CREATE)
981                         p->state.created = 1;
982                 if (cmd_id == CONTROLVM_BUS_DESTROY)
983                         need_clear = true;
984         }
985
986         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
987                 return;         /* no controlvm response needed */
988         if (p->pending_msg_hdr.id != (u32)cmd_id)
989                 return;
990         controlvm_respond(&p->pending_msg_hdr, response);
991         p->pending_msg_hdr.id = CONTROLVM_INVALID;
992         if (need_clear) {
993                 bus_info_clear(p);
994                 busdevices_del(&dev_info_list, bus_no);
995         }
996 }
997
998 static void
999 device_changestate_responder(enum controlvm_id cmd_id,
1000                              struct visorchipset_device_info *p, int response,
1001                              struct spar_segment_state response_state)
1002 {
1003         struct controlvm_message outmsg;
1004         u32 bus_no = p->bus_no;
1005         u32 dev_no = p->dev_no;
1006
1007         if (!p)
1008                 return;
1009         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1010                 return;         /* no controlvm response needed */
1011         if (p->pending_msg_hdr.id != cmd_id)
1012                 return;
1013
1014         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
1015
1016         outmsg.cmd.device_change_state.bus_no = bus_no;
1017         outmsg.cmd.device_change_state.dev_no = dev_no;
1018         outmsg.cmd.device_change_state.state = response_state;
1019
1020         if (!visorchannel_signalinsert(controlvm_channel,
1021                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
1022                 return;
1023
1024         p->pending_msg_hdr.id = CONTROLVM_INVALID;
1025 }
1026
1027 static void
1028 device_responder(enum controlvm_id cmd_id, struct visorchipset_device_info *p,
1029                  int response)
1030 {
1031         bool need_clear = false;
1032
1033         if (!p)
1034                 return;
1035         if (response >= 0) {
1036                 if (cmd_id == CONTROLVM_DEVICE_CREATE)
1037                         p->state.created = 1;
1038                 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
1039                         need_clear = true;
1040         }
1041
1042         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1043                 return;         /* no controlvm response needed */
1044
1045         if (p->pending_msg_hdr.id != (u32)cmd_id)
1046                 return;
1047
1048         controlvm_respond(&p->pending_msg_hdr, response);
1049         p->pending_msg_hdr.id = CONTROLVM_INVALID;
1050         if (need_clear)
1051                 dev_info_clear(p);
1052 }
1053
1054 static void
1055 bus_epilog(struct visorchipset_bus_info *bus_info,
1056            u32 cmd, struct controlvm_message_header *msg_hdr,
1057            int response, bool need_response)
1058 {
1059         bool notified = false;
1060
1061         if (!bus_info)
1062                 return;
1063
1064         if (need_response) {
1065                 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
1066                        sizeof(struct controlvm_message_header));
1067         } else {
1068                 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1069         }
1070
1071         down(&notifier_lock);
1072         if (response == CONTROLVM_RESP_SUCCESS) {
1073                 switch (cmd) {
1074                 case CONTROLVM_BUS_CREATE:
1075                         if (busdev_notifiers.bus_create) {
1076                                 (*busdev_notifiers.bus_create) (bus_info);
1077                                 notified = true;
1078                         }
1079                         break;
1080                 case CONTROLVM_BUS_DESTROY:
1081                         if (busdev_notifiers.bus_destroy) {
1082                                 (*busdev_notifiers.bus_destroy) (bus_info);
1083                                 notified = true;
1084                         }
1085                         break;
1086                 }
1087         }
1088         if (notified)
1089                 /* The callback function just called above is responsible
1090                  * for calling the appropriate visorchipset_busdev_responders
1091                  * function, which will call bus_responder()
1092                  */
1093                 ;
1094         else
1095                 bus_responder(cmd, bus_info, response);
1096         up(&notifier_lock);
1097 }
1098
1099 static void
1100 device_epilog(struct visorchipset_device_info *dev_info,
1101               struct spar_segment_state state, u32 cmd,
1102               struct controlvm_message_header *msg_hdr, int response,
1103               bool need_response, bool for_visorbus)
1104 {
1105         struct visorchipset_busdev_notifiers *notifiers;
1106         bool notified = false;
1107         u32 bus_no = dev_info->bus_no;
1108         u32 dev_no = dev_info->dev_no;
1109
1110         char *envp[] = {
1111                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1112                 NULL
1113         };
1114
1115         if (!dev_info)
1116                 return;
1117
1118         notifiers = &busdev_notifiers;
1119
1120         if (need_response) {
1121                 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
1122                        sizeof(struct controlvm_message_header));
1123         } else {
1124                 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1125         }
1126
1127         down(&notifier_lock);
1128         if (response >= 0) {
1129                 switch (cmd) {
1130                 case CONTROLVM_DEVICE_CREATE:
1131                         if (notifiers->device_create) {
1132                                 (*notifiers->device_create) (dev_info);
1133                                 notified = true;
1134                         }
1135                         break;
1136                 case CONTROLVM_DEVICE_CHANGESTATE:
1137                         /* ServerReady / ServerRunning / SegmentStateRunning */
1138                         if (state.alive == segment_state_running.alive &&
1139                             state.operating ==
1140                                 segment_state_running.operating) {
1141                                 if (notifiers->device_resume) {
1142                                         (*notifiers->device_resume) (dev_info);
1143                                         notified = true;
1144                                 }
1145                         }
1146                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1147                         else if (state.alive == segment_state_standby.alive &&
1148                                  state.operating ==
1149                                  segment_state_standby.operating) {
1150                                 /* technically this is standby case
1151                                  * where server is lost
1152                                  */
1153                                 if (notifiers->device_pause) {
1154                                         (*notifiers->device_pause) (dev_info);
1155                                         notified = true;
1156                                 }
1157                         } else if (state.alive == segment_state_paused.alive &&
1158                                    state.operating ==
1159                                    segment_state_paused.operating) {
1160                                 /* this is lite pause where channel is
1161                                  * still valid just 'pause' of it
1162                                  */
1163                                 if (bus_no == g_diagpool_bus_no &&
1164                                     dev_no == g_diagpool_dev_no) {
1165                                         /* this will trigger the
1166                                          * diag_shutdown.sh script in
1167                                          * the visorchipset hotplug */
1168                                         kobject_uevent_env
1169                                             (&visorchipset_platform_device.dev.
1170                                              kobj, KOBJ_ONLINE, envp);
1171                                 }
1172                         }
1173                         break;
1174                 case CONTROLVM_DEVICE_DESTROY:
1175                         if (notifiers->device_destroy) {
1176                                 (*notifiers->device_destroy) (dev_info);
1177                                 notified = true;
1178                         }
1179                         break;
1180                 }
1181         }
1182         if (notified)
1183                 /* The callback function just called above is responsible
1184                  * for calling the appropriate visorchipset_busdev_responders
1185                  * function, which will call device_responder()
1186                  */
1187                 ;
1188         else
1189                 device_responder(cmd, dev_info, response);
1190         up(&notifier_lock);
1191 }
1192
1193 static void
1194 bus_create(struct controlvm_message *inmsg)
1195 {
1196         struct controlvm_message_packet *cmd = &inmsg->cmd;
1197         u32 bus_no = cmd->create_bus.bus_no;
1198         int rc = CONTROLVM_RESP_SUCCESS;
1199         struct visorchipset_bus_info *bus_info;
1200         struct visorchannel *visorchannel;
1201
1202         bus_info = bus_find(&bus_info_list, bus_no);
1203         if (bus_info && (bus_info->state.created == 1)) {
1204                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1205                                  POSTCODE_SEVERITY_ERR);
1206                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1207                 goto cleanup;
1208         }
1209         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1210         if (!bus_info) {
1211                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1212                                  POSTCODE_SEVERITY_ERR);
1213                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1214                 goto cleanup;
1215         }
1216
1217         INIT_LIST_HEAD(&bus_info->entry);
1218         bus_info->bus_no = bus_no;
1219
1220         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1221
1222         bus_info->flags.server = inmsg->hdr.flags.server;
1223
1224         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1225                                            cmd->create_bus.channel_bytes,
1226                                            GFP_KERNEL,
1227                                            cmd->create_bus.bus_data_type_uuid);
1228
1229         if (!visorchannel) {
1230                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1231                                  POSTCODE_SEVERITY_ERR);
1232                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1233                 kfree(bus_info);
1234                 bus_info = NULL;
1235                 goto cleanup;
1236         }
1237         bus_info->visorchannel = visorchannel;
1238         list_add(&bus_info->entry, &bus_info_list);
1239
1240         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1241
1242 cleanup:
1243         bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1244                    rc, inmsg->hdr.flags.response_expected == 1);
1245 }
1246
1247 static void
1248 bus_destroy(struct controlvm_message *inmsg)
1249 {
1250         struct controlvm_message_packet *cmd = &inmsg->cmd;
1251         u32 bus_no = cmd->destroy_bus.bus_no;
1252         struct visorchipset_bus_info *bus_info;
1253         int rc = CONTROLVM_RESP_SUCCESS;
1254
1255         bus_info = bus_find(&bus_info_list, bus_no);
1256         if (!bus_info)
1257                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1258         else if (bus_info->state.created == 0)
1259                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1260
1261         bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1262                    rc, inmsg->hdr.flags.response_expected == 1);
1263 }
1264
1265 static void
1266 bus_configure(struct controlvm_message *inmsg,
1267               struct parser_context *parser_ctx)
1268 {
1269         struct controlvm_message_packet *cmd = &inmsg->cmd;
1270         u32 bus_no;
1271         struct visorchipset_bus_info *bus_info;
1272         int rc = CONTROLVM_RESP_SUCCESS;
1273         char s[99];
1274
1275         bus_no = cmd->configure_bus.bus_no;
1276         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1277                          POSTCODE_SEVERITY_INFO);
1278
1279         bus_info = bus_find(&bus_info_list, bus_no);
1280         if (!bus_info) {
1281                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1282                                  POSTCODE_SEVERITY_ERR);
1283                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1284         } else if (bus_info->state.created == 0) {
1285                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1286                                  POSTCODE_SEVERITY_ERR);
1287                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1288         } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1289                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1290                                  POSTCODE_SEVERITY_ERR);
1291                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1292         } else {
1293                 visorchannel_set_clientpartition(bus_info->visorchannel,
1294                                 cmd->configure_bus.guest_handle);
1295                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1296                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1297                 bus_info->name = parser_string_get(parser_ctx);
1298
1299                 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1300                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1301                                  POSTCODE_SEVERITY_INFO);
1302         }
1303         bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1304                    rc, inmsg->hdr.flags.response_expected == 1);
1305 }
1306
1307 static void
1308 my_device_create(struct controlvm_message *inmsg)
1309 {
1310         struct controlvm_message_packet *cmd = &inmsg->cmd;
1311         u32 bus_no = cmd->create_device.bus_no;
1312         u32 dev_no = cmd->create_device.dev_no;
1313         struct visorchipset_device_info *dev_info;
1314         struct visorchipset_bus_info *bus_info;
1315         struct visorchannel *visorchannel;
1316         int rc = CONTROLVM_RESP_SUCCESS;
1317
1318         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1319         if (dev_info && (dev_info->state.created == 1)) {
1320                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1321                                  POSTCODE_SEVERITY_ERR);
1322                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1323                 goto cleanup;
1324         }
1325         bus_info = bus_find(&bus_info_list, bus_no);
1326         if (!bus_info) {
1327                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1328                                  POSTCODE_SEVERITY_ERR);
1329                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1330                 goto cleanup;
1331         }
1332         if (bus_info->state.created == 0) {
1333                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1334                                  POSTCODE_SEVERITY_ERR);
1335                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1336                 goto cleanup;
1337         }
1338         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1339         if (!dev_info) {
1340                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1341                                  POSTCODE_SEVERITY_ERR);
1342                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1343                 goto cleanup;
1344         }
1345
1346         INIT_LIST_HEAD(&dev_info->entry);
1347         dev_info->bus_no = bus_no;
1348         dev_info->dev_no = dev_no;
1349         dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1350         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1351                          POSTCODE_SEVERITY_INFO);
1352
1353         visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1354                                            cmd->create_device.channel_bytes,
1355                                            GFP_KERNEL,
1356                                            cmd->create_device.data_type_uuid);
1357
1358         if (!visorchannel) {
1359                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1360                                  POSTCODE_SEVERITY_ERR);
1361                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1362                 kfree(dev_info);
1363                 dev_info = NULL;
1364                 goto cleanup;
1365         }
1366         dev_info->visorchannel = visorchannel;
1367         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1368         list_add(&dev_info->entry, &dev_info_list);
1369         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1370                          POSTCODE_SEVERITY_INFO);
1371 cleanup:
1372         /* get the bus and devNo for DiagPool channel */
1373         if (dev_info &&
1374             is_diagpool_channel(cmd->create_device.data_type_uuid)) {
1375                 g_diagpool_bus_no = bus_no;
1376                 g_diagpool_dev_no = dev_no;
1377         }
1378         device_epilog(dev_info, segment_state_running,
1379                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1380                       inmsg->hdr.flags.response_expected == 1, 1);
1381 }
1382
1383 static void
1384 my_device_changestate(struct controlvm_message *inmsg)
1385 {
1386         struct controlvm_message_packet *cmd = &inmsg->cmd;
1387         u32 bus_no = cmd->device_change_state.bus_no;
1388         u32 dev_no = cmd->device_change_state.dev_no;
1389         struct spar_segment_state state = cmd->device_change_state.state;
1390         struct visorchipset_device_info *dev_info;
1391         int rc = CONTROLVM_RESP_SUCCESS;
1392
1393         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1394         if (!dev_info) {
1395                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1396                                  POSTCODE_SEVERITY_ERR);
1397                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1398         } else if (dev_info->state.created == 0) {
1399                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1400                                  POSTCODE_SEVERITY_ERR);
1401                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1402         }
1403         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1404                 device_epilog(dev_info, state,
1405                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1406                               inmsg->hdr.flags.response_expected == 1, 1);
1407 }
1408
1409 static void
1410 my_device_destroy(struct controlvm_message *inmsg)
1411 {
1412         struct controlvm_message_packet *cmd = &inmsg->cmd;
1413         u32 bus_no = cmd->destroy_device.bus_no;
1414         u32 dev_no = cmd->destroy_device.dev_no;
1415         struct visorchipset_device_info *dev_info;
1416         int rc = CONTROLVM_RESP_SUCCESS;
1417
1418         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1419         if (!dev_info)
1420                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1421         else if (dev_info->state.created == 0)
1422                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1423
1424         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1425                 device_epilog(dev_info, segment_state_running,
1426                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1427                               inmsg->hdr.flags.response_expected == 1, 1);
1428 }
1429
1430 /* When provided with the physical address of the controlvm channel
1431  * (phys_addr), the offset to the payload area we need to manage
1432  * (offset), and the size of this payload area (bytes), fills in the
1433  * controlvm_payload_info struct.  Returns true for success or false
1434  * for failure.
1435  */
1436 static int
1437 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1438                                   struct visor_controlvm_payload_info *info)
1439 {
1440         u8 __iomem *payload = NULL;
1441         int rc = CONTROLVM_RESP_SUCCESS;
1442
1443         if (!info) {
1444                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1445                 goto cleanup;
1446         }
1447         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1448         if ((offset == 0) || (bytes == 0)) {
1449                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1450                 goto cleanup;
1451         }
1452         payload = ioremap_cache(phys_addr + offset, bytes);
1453         if (!payload) {
1454                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1455                 goto cleanup;
1456         }
1457
1458         info->offset = offset;
1459         info->bytes = bytes;
1460         info->ptr = payload;
1461
1462 cleanup:
1463         if (rc < 0) {
1464                 if (payload) {
1465                         iounmap(payload);
1466                         payload = NULL;
1467                 }
1468         }
1469         return rc;
1470 }
1471
1472 static void
1473 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1474 {
1475         if (info->ptr) {
1476                 iounmap(info->ptr);
1477                 info->ptr = NULL;
1478         }
1479         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1480 }
1481
1482 static void
1483 initialize_controlvm_payload(void)
1484 {
1485         u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1486         u64 payload_offset = 0;
1487         u32 payload_bytes = 0;
1488
1489         if (visorchannel_read(controlvm_channel,
1490                               offsetof(struct spar_controlvm_channel_protocol,
1491                                        request_payload_offset),
1492                               &payload_offset, sizeof(payload_offset)) < 0) {
1493                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1494                                  POSTCODE_SEVERITY_ERR);
1495                 return;
1496         }
1497         if (visorchannel_read(controlvm_channel,
1498                               offsetof(struct spar_controlvm_channel_protocol,
1499                                        request_payload_bytes),
1500                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1501                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1502                                  POSTCODE_SEVERITY_ERR);
1503                 return;
1504         }
1505         initialize_controlvm_payload_info(phys_addr,
1506                                           payload_offset, payload_bytes,
1507                                           &controlvm_payload_info);
1508 }
1509
1510 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1511  *  Returns CONTROLVM_RESP_xxx code.
1512  */
1513 static int
1514 visorchipset_chipset_ready(void)
1515 {
1516         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1517         return CONTROLVM_RESP_SUCCESS;
1518 }
1519
1520 static int
1521 visorchipset_chipset_selftest(void)
1522 {
1523         char env_selftest[20];
1524         char *envp[] = { env_selftest, NULL };
1525
1526         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1527         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1528                            envp);
1529         return CONTROLVM_RESP_SUCCESS;
1530 }
1531
1532 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1533  *  Returns CONTROLVM_RESP_xxx code.
1534  */
1535 static int
1536 visorchipset_chipset_notready(void)
1537 {
1538         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1539         return CONTROLVM_RESP_SUCCESS;
1540 }
1541
1542 static void
1543 chipset_ready(struct controlvm_message_header *msg_hdr)
1544 {
1545         int rc = visorchipset_chipset_ready();
1546
1547         if (rc != CONTROLVM_RESP_SUCCESS)
1548                 rc = -rc;
1549         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1550                 controlvm_respond(msg_hdr, rc);
1551         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1552                 /* Send CHIPSET_READY response when all modules have been loaded
1553                  * and disks mounted for the partition
1554                  */
1555                 g_chipset_msg_hdr = *msg_hdr;
1556         }
1557 }
1558
1559 static void
1560 chipset_selftest(struct controlvm_message_header *msg_hdr)
1561 {
1562         int rc = visorchipset_chipset_selftest();
1563
1564         if (rc != CONTROLVM_RESP_SUCCESS)
1565                 rc = -rc;
1566         if (msg_hdr->flags.response_expected)
1567                 controlvm_respond(msg_hdr, rc);
1568 }
1569
1570 static void
1571 chipset_notready(struct controlvm_message_header *msg_hdr)
1572 {
1573         int rc = visorchipset_chipset_notready();
1574
1575         if (rc != CONTROLVM_RESP_SUCCESS)
1576                 rc = -rc;
1577         if (msg_hdr->flags.response_expected)
1578                 controlvm_respond(msg_hdr, rc);
1579 }
1580
1581 /* This is your "one-stop" shop for grabbing the next message from the
1582  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1583  */
1584 static bool
1585 read_controlvm_event(struct controlvm_message *msg)
1586 {
1587         if (visorchannel_signalremove(controlvm_channel,
1588                                       CONTROLVM_QUEUE_EVENT, msg)) {
1589                 /* got a message */
1590                 if (msg->hdr.flags.test_message == 1)
1591                         return false;
1592                 return true;
1593         }
1594         return false;
1595 }
1596
1597 /*
1598  * The general parahotplug flow works as follows.  The visorchipset
1599  * driver receives a DEVICE_CHANGESTATE message from Command
1600  * specifying a physical device to enable or disable.  The CONTROLVM
1601  * message handler calls parahotplug_process_message, which then adds
1602  * the message to a global list and kicks off a udev event which
1603  * causes a user level script to enable or disable the specified
1604  * device.  The udev script then writes to
1605  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1606  * to get called, at which point the appropriate CONTROLVM message is
1607  * retrieved from the list and responded to.
1608  */
1609
1610 #define PARAHOTPLUG_TIMEOUT_MS 2000
1611
1612 /*
1613  * Generate unique int to match an outstanding CONTROLVM message with a
1614  * udev script /proc response
1615  */
1616 static int
1617 parahotplug_next_id(void)
1618 {
1619         static atomic_t id = ATOMIC_INIT(0);
1620
1621         return atomic_inc_return(&id);
1622 }
1623
1624 /*
1625  * Returns the time (in jiffies) when a CONTROLVM message on the list
1626  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1627  */
1628 static unsigned long
1629 parahotplug_next_expiration(void)
1630 {
1631         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1632 }
1633
1634 /*
1635  * Create a parahotplug_request, which is basically a wrapper for a
1636  * CONTROLVM_MESSAGE that we can stick on a list
1637  */
1638 static struct parahotplug_request *
1639 parahotplug_request_create(struct controlvm_message *msg)
1640 {
1641         struct parahotplug_request *req;
1642
1643         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1644         if (!req)
1645                 return NULL;
1646
1647         req->id = parahotplug_next_id();
1648         req->expiration = parahotplug_next_expiration();
1649         req->msg = *msg;
1650
1651         return req;
1652 }
1653
1654 /*
1655  * Free a parahotplug_request.
1656  */
1657 static void
1658 parahotplug_request_destroy(struct parahotplug_request *req)
1659 {
1660         kfree(req);
1661 }
1662
1663 /*
1664  * Cause uevent to run the user level script to do the disable/enable
1665  * specified in (the CONTROLVM message in) the specified
1666  * parahotplug_request
1667  */
1668 static void
1669 parahotplug_request_kickoff(struct parahotplug_request *req)
1670 {
1671         struct controlvm_message_packet *cmd = &req->msg.cmd;
1672         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1673             env_func[40];
1674         char *envp[] = {
1675                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1676         };
1677
1678         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1679         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1680         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1681                 cmd->device_change_state.state.active);
1682         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1683                 cmd->device_change_state.bus_no);
1684         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1685                 cmd->device_change_state.dev_no >> 3);
1686         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1687                 cmd->device_change_state.dev_no & 0x7);
1688
1689         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1690                            envp);
1691 }
1692
1693 /*
1694  * Remove any request from the list that's been on there too long and
1695  * respond with an error.
1696  */
1697 static void
1698 parahotplug_process_list(void)
1699 {
1700         struct list_head *pos;
1701         struct list_head *tmp;
1702
1703         spin_lock(&parahotplug_request_list_lock);
1704
1705         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1706                 struct parahotplug_request *req =
1707                     list_entry(pos, struct parahotplug_request, list);
1708
1709                 if (!time_after_eq(jiffies, req->expiration))
1710                         continue;
1711
1712                 list_del(pos);
1713                 if (req->msg.hdr.flags.response_expected)
1714                         controlvm_respond_physdev_changestate(
1715                                 &req->msg.hdr,
1716                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1717                                 req->msg.cmd.device_change_state.state);
1718                 parahotplug_request_destroy(req);
1719         }
1720
1721         spin_unlock(&parahotplug_request_list_lock);
1722 }
1723
1724 /*
1725  * Called from the /proc handler, which means the user script has
1726  * finished the enable/disable.  Find the matching identifier, and
1727  * respond to the CONTROLVM message with success.
1728  */
1729 static int
1730 parahotplug_request_complete(int id, u16 active)
1731 {
1732         struct list_head *pos;
1733         struct list_head *tmp;
1734
1735         spin_lock(&parahotplug_request_list_lock);
1736
1737         /* Look for a request matching "id". */
1738         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1739                 struct parahotplug_request *req =
1740                     list_entry(pos, struct parahotplug_request, list);
1741                 if (req->id == id) {
1742                         /* Found a match.  Remove it from the list and
1743                          * respond.
1744                          */
1745                         list_del(pos);
1746                         spin_unlock(&parahotplug_request_list_lock);
1747                         req->msg.cmd.device_change_state.state.active = active;
1748                         if (req->msg.hdr.flags.response_expected)
1749                                 controlvm_respond_physdev_changestate(
1750                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1751                                         req->msg.cmd.device_change_state.state);
1752                         parahotplug_request_destroy(req);
1753                         return 0;
1754                 }
1755         }
1756
1757         spin_unlock(&parahotplug_request_list_lock);
1758         return -1;
1759 }
1760
1761 /*
1762  * Enables or disables a PCI device by kicking off a udev script
1763  */
1764 static void
1765 parahotplug_process_message(struct controlvm_message *inmsg)
1766 {
1767         struct parahotplug_request *req;
1768
1769         req = parahotplug_request_create(inmsg);
1770
1771         if (!req)
1772                 return;
1773
1774         if (inmsg->cmd.device_change_state.state.active) {
1775                 /* For enable messages, just respond with success
1776                 * right away.  This is a bit of a hack, but there are
1777                 * issues with the early enable messages we get (with
1778                 * either the udev script not detecting that the device
1779                 * is up, or not getting called at all).  Fortunately
1780                 * the messages that get lost don't matter anyway, as
1781                 * devices are automatically enabled at
1782                 * initialization.
1783                 */
1784                 parahotplug_request_kickoff(req);
1785                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1786                         CONTROLVM_RESP_SUCCESS,
1787                         inmsg->cmd.device_change_state.state);
1788                 parahotplug_request_destroy(req);
1789         } else {
1790                 /* For disable messages, add the request to the
1791                 * request list before kicking off the udev script.  It
1792                 * won't get responded to until the script has
1793                 * indicated it's done.
1794                 */
1795                 spin_lock(&parahotplug_request_list_lock);
1796                 list_add_tail(&req->list, &parahotplug_request_list);
1797                 spin_unlock(&parahotplug_request_list_lock);
1798
1799                 parahotplug_request_kickoff(req);
1800         }
1801 }
1802
1803 /* Process a controlvm message.
1804  * Return result:
1805  *    false - this function will return false only in the case where the
1806  *            controlvm message was NOT processed, but processing must be
1807  *            retried before reading the next controlvm message; a
1808  *            scenario where this can occur is when we need to throttle
1809  *            the allocation of memory in which to copy out controlvm
1810  *            payload data
1811  *    true  - processing of the controlvm message completed,
1812  *            either successfully or with an error.
1813  */
1814 static bool
1815 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1816 {
1817         struct controlvm_message_packet *cmd = &inmsg.cmd;
1818         u64 parm_addr;
1819         u32 parm_bytes;
1820         struct parser_context *parser_ctx = NULL;
1821         bool local_addr;
1822         struct controlvm_message ackmsg;
1823
1824         /* create parsing context if necessary */
1825         local_addr = (inmsg.hdr.flags.test_message == 1);
1826         if (channel_addr == 0)
1827                 return true;
1828         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1829         parm_bytes = inmsg.hdr.payload_bytes;
1830
1831         /* Parameter and channel addresses within test messages actually lie
1832          * within our OS-controlled memory.  We need to know that, because it
1833          * makes a difference in how we compute the virtual address.
1834          */
1835         if (parm_addr && parm_bytes) {
1836                 bool retry = false;
1837
1838                 parser_ctx =
1839                     parser_init_byte_stream(parm_addr, parm_bytes,
1840                                             local_addr, &retry);
1841                 if (!parser_ctx && retry)
1842                         return false;
1843         }
1844
1845         if (!local_addr) {
1846                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1847                                         CONTROLVM_RESP_SUCCESS);
1848                 if (controlvm_channel)
1849                         visorchannel_signalinsert(controlvm_channel,
1850                                                   CONTROLVM_QUEUE_ACK,
1851                                                   &ackmsg);
1852         }
1853         switch (inmsg.hdr.id) {
1854         case CONTROLVM_CHIPSET_INIT:
1855                 chipset_init(&inmsg);
1856                 break;
1857         case CONTROLVM_BUS_CREATE:
1858                 bus_create(&inmsg);
1859                 break;
1860         case CONTROLVM_BUS_DESTROY:
1861                 bus_destroy(&inmsg);
1862                 break;
1863         case CONTROLVM_BUS_CONFIGURE:
1864                 bus_configure(&inmsg, parser_ctx);
1865                 break;
1866         case CONTROLVM_DEVICE_CREATE:
1867                 my_device_create(&inmsg);
1868                 break;
1869         case CONTROLVM_DEVICE_CHANGESTATE:
1870                 if (cmd->device_change_state.flags.phys_device) {
1871                         parahotplug_process_message(&inmsg);
1872                 } else {
1873                         /* save the hdr and cmd structures for later use */
1874                         /* when sending back the response to Command */
1875                         my_device_changestate(&inmsg);
1876                         g_devicechangestate_packet = inmsg.cmd;
1877                         break;
1878                 }
1879                 break;
1880         case CONTROLVM_DEVICE_DESTROY:
1881                 my_device_destroy(&inmsg);
1882                 break;
1883         case CONTROLVM_DEVICE_CONFIGURE:
1884                 /* no op for now, just send a respond that we passed */
1885                 if (inmsg.hdr.flags.response_expected)
1886                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1887                 break;
1888         case CONTROLVM_CHIPSET_READY:
1889                 chipset_ready(&inmsg.hdr);
1890                 break;
1891         case CONTROLVM_CHIPSET_SELFTEST:
1892                 chipset_selftest(&inmsg.hdr);
1893                 break;
1894         case CONTROLVM_CHIPSET_STOP:
1895                 chipset_notready(&inmsg.hdr);
1896                 break;
1897         default:
1898                 if (inmsg.hdr.flags.response_expected)
1899                         controlvm_respond(&inmsg.hdr,
1900                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1901                 break;
1902         }
1903
1904         if (parser_ctx) {
1905                 parser_done(parser_ctx);
1906                 parser_ctx = NULL;
1907         }
1908         return true;
1909 }
1910
1911 static inline unsigned int
1912 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1913 {
1914         struct vmcall_io_controlvm_addr_params params;
1915         int result = VMCALL_SUCCESS;
1916         u64 physaddr;
1917
1918         physaddr = virt_to_phys(&params);
1919         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1920         if (VMCALL_SUCCESSFUL(result)) {
1921                 *control_addr = params.address;
1922                 *control_bytes = params.channel_bytes;
1923         }
1924         return result;
1925 }
1926
1927 static u64 controlvm_get_channel_address(void)
1928 {
1929         u64 addr = 0;
1930         u32 size = 0;
1931
1932         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1933                 return 0;
1934
1935         return addr;
1936 }
1937
1938 static void
1939 controlvm_periodic_work(struct work_struct *work)
1940 {
1941         struct controlvm_message inmsg;
1942         bool got_command = false;
1943         bool handle_command_failed = false;
1944         static u64 poll_count;
1945
1946         /* make sure visorbus server is registered for controlvm callbacks */
1947         if (visorchipset_visorbusregwait && !visorbusregistered)
1948                 goto cleanup;
1949
1950         poll_count++;
1951         if (poll_count >= 250)
1952                 ;       /* keep going */
1953         else
1954                 goto cleanup;
1955
1956         /* Check events to determine if response to CHIPSET_READY
1957          * should be sent
1958          */
1959         if (visorchipset_holdchipsetready &&
1960             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1961                 if (check_chipset_events() == 1) {
1962                         controlvm_respond(&g_chipset_msg_hdr, 0);
1963                         clear_chipset_events();
1964                         memset(&g_chipset_msg_hdr, 0,
1965                                sizeof(struct controlvm_message_header));
1966                 }
1967         }
1968
1969         while (visorchannel_signalremove(controlvm_channel,
1970                                          CONTROLVM_QUEUE_RESPONSE,
1971                                          &inmsg))
1972                 ;
1973         if (!got_command) {
1974                 if (controlvm_pending_msg_valid) {
1975                         /* we throttled processing of a prior
1976                         * msg, so try to process it again
1977                         * rather than reading a new one
1978                         */
1979                         inmsg = controlvm_pending_msg;
1980                         controlvm_pending_msg_valid = false;
1981                         got_command = true;
1982                 } else {
1983                         got_command = read_controlvm_event(&inmsg);
1984                 }
1985         }
1986
1987         handle_command_failed = false;
1988         while (got_command && (!handle_command_failed)) {
1989                 most_recent_message_jiffies = jiffies;
1990                 if (handle_command(inmsg,
1991                                    visorchannel_get_physaddr
1992                                    (controlvm_channel)))
1993                         got_command = read_controlvm_event(&inmsg);
1994                 else {
1995                         /* this is a scenario where throttling
1996                         * is required, but probably NOT an
1997                         * error...; we stash the current
1998                         * controlvm msg so we will attempt to
1999                         * reprocess it on our next loop
2000                         */
2001                         handle_command_failed = true;
2002                         controlvm_pending_msg = inmsg;
2003                         controlvm_pending_msg_valid = true;
2004                 }
2005         }
2006
2007         /* parahotplug_worker */
2008         parahotplug_process_list();
2009
2010 cleanup:
2011
2012         if (time_after(jiffies,
2013                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2014                 /* it's been longer than MIN_IDLE_SECONDS since we
2015                 * processed our last controlvm message; slow down the
2016                 * polling
2017                 */
2018                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2019                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2020         } else {
2021                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2022                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2023         }
2024
2025         queue_delayed_work(periodic_controlvm_workqueue,
2026                            &periodic_controlvm_work, poll_jiffies);
2027 }
2028
2029 static void
2030 setup_crash_devices_work_queue(struct work_struct *work)
2031 {
2032         struct controlvm_message local_crash_bus_msg;
2033         struct controlvm_message local_crash_dev_msg;
2034         struct controlvm_message msg;
2035         u32 local_crash_msg_offset;
2036         u16 local_crash_msg_count;
2037
2038         /* make sure visorbus is registered for controlvm callbacks */
2039         if (visorchipset_visorbusregwait && !visorbusregistered)
2040                 goto cleanup;
2041
2042         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2043
2044         /* send init chipset msg */
2045         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2046         msg.cmd.init_chipset.bus_count = 23;
2047         msg.cmd.init_chipset.switch_count = 0;
2048
2049         chipset_init(&msg);
2050
2051         /* get saved message count */
2052         if (visorchannel_read(controlvm_channel,
2053                               offsetof(struct spar_controlvm_channel_protocol,
2054                                        saved_crash_message_count),
2055                               &local_crash_msg_count, sizeof(u16)) < 0) {
2056                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2057                                  POSTCODE_SEVERITY_ERR);
2058                 return;
2059         }
2060
2061         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2062                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2063                                  local_crash_msg_count,
2064                                  POSTCODE_SEVERITY_ERR);
2065                 return;
2066         }
2067
2068         /* get saved crash message offset */
2069         if (visorchannel_read(controlvm_channel,
2070                               offsetof(struct spar_controlvm_channel_protocol,
2071                                        saved_crash_message_offset),
2072                               &local_crash_msg_offset, sizeof(u32)) < 0) {
2073                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2074                                  POSTCODE_SEVERITY_ERR);
2075                 return;
2076         }
2077
2078         /* read create device message for storage bus offset */
2079         if (visorchannel_read(controlvm_channel,
2080                               local_crash_msg_offset,
2081                               &local_crash_bus_msg,
2082                               sizeof(struct controlvm_message)) < 0) {
2083                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2084                                  POSTCODE_SEVERITY_ERR);
2085                 return;
2086         }
2087
2088         /* read create device message for storage device */
2089         if (visorchannel_read(controlvm_channel,
2090                               local_crash_msg_offset +
2091                               sizeof(struct controlvm_message),
2092                               &local_crash_dev_msg,
2093                               sizeof(struct controlvm_message)) < 0) {
2094                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2095                                  POSTCODE_SEVERITY_ERR);
2096                 return;
2097         }
2098
2099         /* reuse IOVM create bus message */
2100         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2101                 bus_create(&local_crash_bus_msg);
2102         } else {
2103                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2104                                  POSTCODE_SEVERITY_ERR);
2105                 return;
2106         }
2107
2108         /* reuse create device message for storage device */
2109         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2110                 my_device_create(&local_crash_dev_msg);
2111         } else {
2112                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2113                                  POSTCODE_SEVERITY_ERR);
2114                 return;
2115         }
2116         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2117         return;
2118
2119 cleanup:
2120
2121         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2122
2123         queue_delayed_work(periodic_controlvm_workqueue,
2124                            &periodic_controlvm_work, poll_jiffies);
2125 }
2126
2127 static void
2128 bus_create_response(struct visorchipset_bus_info *bus_info, int response)
2129 {
2130         bus_responder(CONTROLVM_BUS_CREATE, bus_info, response);
2131 }
2132
2133 static void
2134 bus_destroy_response(struct visorchipset_bus_info *bus_info, int response)
2135 {
2136         bus_responder(CONTROLVM_BUS_DESTROY, bus_info, response);
2137 }
2138
2139 static void
2140 device_create_response(struct visorchipset_device_info *dev_info, int response)
2141 {
2142         device_responder(CONTROLVM_DEVICE_CREATE, dev_info, response);
2143 }
2144
2145 static void
2146 device_destroy_response(struct visorchipset_device_info *dev_info, int response)
2147 {
2148         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info, response);
2149 }
2150
2151 static void
2152 visorchipset_device_pause_response(struct visorchipset_device_info *dev_info,
2153                                    int response)
2154 {
2155         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2156                                      dev_info, response,
2157                                      segment_state_standby);
2158 }
2159
2160 static void
2161 device_resume_response(struct visorchipset_device_info *dev_info, int response)
2162 {
2163         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2164                                      dev_info, response,
2165                                      segment_state_running);
2166 }
2167
2168 bool
2169 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2170 {
2171         void *p = bus_find(&bus_info_list, bus_no);
2172
2173         if (!p)
2174                 return false;
2175         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2176         return true;
2177 }
2178 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2179
2180 bool
2181 visorchipset_set_bus_context(struct visorchipset_bus_info *p, void *context)
2182 {
2183         if (!p)
2184                 return false;
2185         p->bus_driver_context = context;
2186         return true;
2187 }
2188 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2189
2190 bool
2191 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2192                              struct visorchipset_device_info *dev_info)
2193 {
2194         void *p = device_find(&dev_info_list, bus_no, dev_no);
2195
2196         if (!p)
2197                 return false;
2198         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2199         return true;
2200 }
2201 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2202
2203 bool
2204 visorchipset_set_device_context(struct visorchipset_device_info *p,
2205                                 void *context)
2206 {
2207         if (!p)
2208                 return false;
2209         p->bus_driver_context = context;
2210         return true;
2211 }
2212 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2213
2214 static ssize_t chipsetready_store(struct device *dev,
2215                                   struct device_attribute *attr,
2216                                   const char *buf, size_t count)
2217 {
2218         char msgtype[64];
2219
2220         if (sscanf(buf, "%63s", msgtype) != 1)
2221                 return -EINVAL;
2222
2223         if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2224                 chipset_events[0] = 1;
2225                 return count;
2226         } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2227                 chipset_events[1] = 1;
2228                 return count;
2229         }
2230         return -EINVAL;
2231 }
2232
2233 /* The parahotplug/devicedisabled interface gets called by our support script
2234  * when an SR-IOV device has been shut down. The ID is passed to the script
2235  * and then passed back when the device has been removed.
2236  */
2237 static ssize_t devicedisabled_store(struct device *dev,
2238                                     struct device_attribute *attr,
2239                                     const char *buf, size_t count)
2240 {
2241         unsigned int id;
2242
2243         if (kstrtouint(buf, 10, &id))
2244                 return -EINVAL;
2245
2246         parahotplug_request_complete(id, 0);
2247         return count;
2248 }
2249
2250 /* The parahotplug/deviceenabled interface gets called by our support script
2251  * when an SR-IOV device has been recovered. The ID is passed to the script
2252  * and then passed back when the device has been brought back up.
2253  */
2254 static ssize_t deviceenabled_store(struct device *dev,
2255                                    struct device_attribute *attr,
2256                                    const char *buf, size_t count)
2257 {
2258         unsigned int id;
2259
2260         if (kstrtouint(buf, 10, &id))
2261                 return -EINVAL;
2262
2263         parahotplug_request_complete(id, 1);
2264         return count;
2265 }
2266
2267 static int
2268 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2269 {
2270         unsigned long physaddr = 0;
2271         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2272         u64 addr = 0;
2273
2274         /* sv_enable_dfp(); */
2275         if (offset & (PAGE_SIZE - 1))
2276                 return -ENXIO;  /* need aligned offsets */
2277
2278         switch (offset) {
2279         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2280                 vma->vm_flags |= VM_IO;
2281                 if (!*file_controlvm_channel)
2282                         return -ENXIO;
2283
2284                 visorchannel_read(*file_controlvm_channel,
2285                         offsetof(struct spar_controlvm_channel_protocol,
2286                                  gp_control_channel),
2287                         &addr, sizeof(addr));
2288                 if (!addr)
2289                         return -ENXIO;
2290
2291                 physaddr = (unsigned long)addr;
2292                 if (remap_pfn_range(vma, vma->vm_start,
2293                                     physaddr >> PAGE_SHIFT,
2294                                     vma->vm_end - vma->vm_start,
2295                                     /*pgprot_noncached */
2296                                     (vma->vm_page_prot))) {
2297                         return -EAGAIN;
2298                 }
2299                 break;
2300         default:
2301                 return -ENXIO;
2302         }
2303         return 0;
2304 }
2305
2306 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2307 {
2308         u64 result = VMCALL_SUCCESS;
2309         u64 physaddr = 0;
2310
2311         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2312                         result);
2313         return result;
2314 }
2315
2316 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2317 {
2318         int result = VMCALL_SUCCESS;
2319
2320         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2321         return result;
2322 }
2323
2324 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2325                                unsigned long arg)
2326 {
2327         s64 adjustment;
2328         s64 vrtc_offset;
2329
2330         switch (cmd) {
2331         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2332                 /* get the physical rtc offset */
2333                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2334                 if (copy_to_user((void __user *)arg, &vrtc_offset,
2335                                  sizeof(vrtc_offset))) {
2336                         return -EFAULT;
2337                 }
2338                 return 0;
2339         case VMCALL_UPDATE_PHYSICAL_TIME:
2340                 if (copy_from_user(&adjustment, (void __user *)arg,
2341                                    sizeof(adjustment))) {
2342                         return -EFAULT;
2343                 }
2344                 return issue_vmcall_update_physical_time(adjustment);
2345         default:
2346                 return -EFAULT;
2347         }
2348 }
2349
2350 static const struct file_operations visorchipset_fops = {
2351         .owner = THIS_MODULE,
2352         .open = visorchipset_open,
2353         .read = NULL,
2354         .write = NULL,
2355         .unlocked_ioctl = visorchipset_ioctl,
2356         .release = visorchipset_release,
2357         .mmap = visorchipset_mmap,
2358 };
2359
2360 static int
2361 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2362 {
2363         int rc = 0;
2364
2365         file_controlvm_channel = controlvm_channel;
2366         cdev_init(&file_cdev, &visorchipset_fops);
2367         file_cdev.owner = THIS_MODULE;
2368         if (MAJOR(major_dev) == 0) {
2369                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2370                 /* dynamic major device number registration required */
2371                 if (rc < 0)
2372                         return rc;
2373         } else {
2374                 /* static major device number registration required */
2375                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2376                 if (rc < 0)
2377                         return rc;
2378         }
2379         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2380         if (rc < 0) {
2381                 unregister_chrdev_region(major_dev, 1);
2382                 return rc;
2383         }
2384         return 0;
2385 }
2386
2387 static int
2388 visorchipset_init(struct acpi_device *acpi_device)
2389 {
2390         int rc = 0;
2391         u64 addr;
2392         int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2393         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2394
2395         addr = controlvm_get_channel_address();
2396         if (!addr)
2397                 return -ENODEV;
2398
2399         memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2400         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2401
2402         controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2403                                                           GFP_KERNEL, uuid);
2404         if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2405                     visorchannel_get_header(controlvm_channel))) {
2406                 initialize_controlvm_payload();
2407         } else {
2408                 visorchannel_destroy(controlvm_channel);
2409                 controlvm_channel = NULL;
2410                 return -ENODEV;
2411         }
2412
2413         major_dev = MKDEV(visorchipset_major, 0);
2414         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2415         if (rc < 0) {
2416                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2417                 goto cleanup;
2418         }
2419
2420         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2421
2422         /* if booting in a crash kernel */
2423         if (is_kdump_kernel())
2424                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2425                                   setup_crash_devices_work_queue);
2426         else
2427                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2428                                   controlvm_periodic_work);
2429         periodic_controlvm_workqueue =
2430             create_singlethread_workqueue("visorchipset_controlvm");
2431
2432         if (!periodic_controlvm_workqueue) {
2433                 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2434                                  DIAG_SEVERITY_ERR);
2435                 rc = -ENOMEM;
2436                 goto cleanup;
2437         }
2438         most_recent_message_jiffies = jiffies;
2439         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2440         rc = queue_delayed_work(periodic_controlvm_workqueue,
2441                                 &periodic_controlvm_work, poll_jiffies);
2442         if (rc < 0) {
2443                 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2444                                  DIAG_SEVERITY_ERR);
2445                 goto cleanup;
2446         }
2447
2448         visorchipset_platform_device.dev.devt = major_dev;
2449         if (platform_device_register(&visorchipset_platform_device) < 0) {
2450                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2451                 rc = -1;
2452                 goto cleanup;
2453         }
2454         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2455
2456         rc = visorbus_init();
2457 cleanup:
2458         if (rc) {
2459                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2460                                  POSTCODE_SEVERITY_ERR);
2461         }
2462         return rc;
2463 }
2464
2465 static void
2466 visorchipset_file_cleanup(dev_t major_dev)
2467 {
2468         if (file_cdev.ops)
2469                 cdev_del(&file_cdev);
2470         file_cdev.ops = NULL;
2471         unregister_chrdev_region(major_dev, 1);
2472 }
2473
2474 static int
2475 visorchipset_exit(struct acpi_device *acpi_device)
2476 {
2477         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2478
2479         visorbus_exit();
2480
2481         cancel_delayed_work(&periodic_controlvm_work);
2482         flush_workqueue(periodic_controlvm_workqueue);
2483         destroy_workqueue(periodic_controlvm_workqueue);
2484         periodic_controlvm_workqueue = NULL;
2485         destroy_controlvm_payload_info(&controlvm_payload_info);
2486
2487         cleanup_controlvm_structures();
2488
2489         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2490
2491         visorchannel_destroy(controlvm_channel);
2492
2493         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2494         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2495
2496         return 0;
2497 }
2498
2499 static const struct acpi_device_id unisys_device_ids[] = {
2500         {"PNP0A07", 0},
2501         {"", 0},
2502 };
2503
2504 static struct acpi_driver unisys_acpi_driver = {
2505         .name = "unisys_acpi",
2506         .class = "unisys_acpi_class",
2507         .owner = THIS_MODULE,
2508         .ids = unisys_device_ids,
2509         .ops = {
2510                 .add = visorchipset_init,
2511                 .remove = visorchipset_exit,
2512                 },
2513 };
2514 static __init uint32_t visorutil_spar_detect(void)
2515 {
2516         unsigned int eax, ebx, ecx, edx;
2517
2518         if (cpu_has_hypervisor) {
2519                 /* check the ID */
2520                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2521                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2522                         (ecx == UNISYS_SPAR_ID_ECX) &&
2523                         (edx == UNISYS_SPAR_ID_EDX);
2524         } else {
2525                 return 0;
2526         }
2527 }
2528
2529 static int init_unisys(void)
2530 {
2531         int result;
2532         if (!visorutil_spar_detect())
2533                 return -ENODEV;
2534
2535         result = acpi_bus_register_driver(&unisys_acpi_driver);
2536         if (result)
2537                 return -ENODEV;
2538
2539         pr_info("Unisys Visorchipset Driver Loaded.\n");
2540         return 0;
2541 };
2542
2543 static void exit_unisys(void)
2544 {
2545         acpi_bus_unregister_driver(&unisys_acpi_driver);
2546 }
2547
2548 module_param_named(major, visorchipset_major, int, S_IRUGO);
2549 MODULE_PARM_DESC(visorchipset_major,
2550                  "major device number to use for the device node");
2551 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2552 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2553                  "1 to have the module wait for the visor bus to register");
2554 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2555                    int, S_IRUGO);
2556 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2557                  "1 to hold response to CHIPSET_READY");
2558
2559 module_init(init_unisys);
2560 module_exit(exit_unisys);
2561
2562 MODULE_AUTHOR("Unisys");
2563 MODULE_LICENSE("GPL");
2564 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2565                    VERSION);
2566 MODULE_VERSION(VERSION);