]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/unisys/visorchipset/visorchipset_main.c
staging: unisys: fix kdump support
[karo-tx-linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33 #include <linux/crash_dump.h>
34
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0"        /* physical network itf for
37                                          * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE   50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
48 * we switch to slow polling mode.  As soon as we get a controlvm
49 * message, we switch back to fast polling mode.
50 */
51 #define MIN_IDLE_SECONDS 10
52 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
53 static ulong most_recent_message_jiffies;       /* when we got our last
54                                                  * controlvm message */
55 static inline char *
56 NONULLSTR(char *s)
57 {
58         if (s)
59                 return s;
60         return "";
61 }
62
63 static int serverregistered;
64 static int clientregistered;
65
66 #define MAX_CHIPSET_EVENTS 2
67 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68
69 static struct delayed_work periodic_controlvm_work;
70 static struct workqueue_struct *periodic_controlvm_workqueue;
71 static DEFINE_SEMAPHORE(notifier_lock);
72
73 static struct controlvm_message_header g_diag_msg_hdr;
74 static struct controlvm_message_header g_chipset_msg_hdr;
75 static struct controlvm_message_header g_del_dump_msg_hdr;
76 static const uuid_le spar_diag_pool_channel_protocol_uuid =
77         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
78 /* 0xffffff is an invalid Bus/Device number */
79 static ulong g_diagpool_bus_no = 0xffffff;
80 static ulong g_diagpool_dev_no = 0xffffff;
81 static struct controlvm_message_packet g_devicechangestate_packet;
82
83 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
84  * "visorhackbus")
85  */
86 #define FOR_VISORHACKBUS(channel_type_guid) \
87         (((uuid_le_cmp(channel_type_guid,\
88                        spar_vnic_channel_protocol_uuid) == 0) ||\
89         (uuid_le_cmp(channel_type_guid,\
90                         spar_vhba_channel_protocol_uuid) == 0)))
91 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92
93 #define is_diagpool_channel(channel_type_guid) \
94         (uuid_le_cmp(channel_type_guid,\
95                      spar_diag_pool_channel_protocol_uuid) == 0)
96
97 static LIST_HEAD(bus_info_list);
98 static LIST_HEAD(dev_info_list);
99
100 static struct visorchannel *controlvm_channel;
101
102 /* Manages the request payload in the controlvm channel */
103 static struct controlvm_payload_info {
104         u8 __iomem *ptr;        /* pointer to base address of payload pool */
105         u64 offset;             /* offset from beginning of controlvm
106                                  * channel to beginning of payload * pool */
107         u32 bytes;              /* number of bytes in payload pool */
108 } controlvm_payload_info;
109
110 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
111  * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
112  */
113 static struct livedump_info {
114         struct controlvm_message_header dumpcapture_header;
115         struct controlvm_message_header gettextdump_header;
116         struct controlvm_message_header dumpcomplete_header;
117         BOOL gettextdump_outstanding;
118         u32 crc32;
119         ulong length;
120         atomic_t buffers_in_use;
121         ulong destination;
122 } livedump_info;
123
124 /* The following globals are used to handle the scenario where we are unable to
125  * offload the payload from a controlvm message due to memory requirements.  In
126  * this scenario, we simply stash the controlvm message, then attempt to
127  * process it again the next time controlvm_periodic_work() runs.
128  */
129 static struct controlvm_message controlvm_pending_msg;
130 static BOOL controlvm_pending_msg_valid = FALSE;
131
132 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
133  * TRANSMIT_FILE PutFile payloads.
134  */
135 static struct kmem_cache *putfile_buffer_list_pool;
136 static const char putfile_buffer_list_pool_name[] =
137         "controlvm_putfile_buffer_list_pool";
138
139 /* This identifies a data buffer that has been received via a controlvm messages
140  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
141  */
142 struct putfile_buffer_entry {
143         struct list_head next;  /* putfile_buffer_entry list */
144         struct parser_context *parser_ctx; /* points to input data buffer */
145 };
146
147 /* List of struct putfile_request *, via next_putfile_request member.
148  * Each entry in this list identifies an outstanding TRANSMIT_FILE
149  * conversation.
150  */
151 static LIST_HEAD(putfile_request_list);
152
153 /* This describes a buffer and its current state of transfer (e.g., how many
154  * bytes have already been supplied as putfile data, and how many bytes are
155  * remaining) for a putfile_request.
156  */
157 struct putfile_active_buffer {
158         /* a payload from a controlvm message, containing a file data buffer */
159         struct parser_context *parser_ctx;
160         /* points within data area of parser_ctx to next byte of data */
161         u8 *pnext;
162         /* # bytes left from <pnext> to the end of this data buffer */
163         size_t bytes_remaining;
164 };
165
166 #define PUTFILE_REQUEST_SIG 0x0906101302281211
167 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
168  * conversation.  Structs of this type are dynamically linked into
169  * <Putfile_request_list>.
170  */
171 struct putfile_request {
172         u64 sig;                /* PUTFILE_REQUEST_SIG */
173
174         /* header from original TransmitFile request */
175         struct controlvm_message_header controlvm_header;
176         u64 file_request_number;        /* from original TransmitFile request */
177
178         /* link to next struct putfile_request */
179         struct list_head next_putfile_request;
180
181         /* most-recent sequence number supplied via a controlvm message */
182         u64 data_sequence_number;
183
184         /* head of putfile_buffer_entry list, which describes the data to be
185          * supplied as putfile data;
186          * - this list is added to when controlvm messages come in that supply
187          * file data
188          * - this list is removed from via the hotplug program that is actually
189          * consuming these buffers to write as file data */
190         struct list_head input_buffer_list;
191         spinlock_t req_list_lock;       /* lock for input_buffer_list */
192
193         /* waiters for input_buffer_list to go non-empty */
194         wait_queue_head_t input_buffer_wq;
195
196         /* data not yet read within current putfile_buffer_entry */
197         struct putfile_active_buffer active_buf;
198
199         /* <0 = failed, 0 = in-progress, >0 = successful; */
200         /* note that this must be set with req_list_lock, and if you set <0, */
201         /* it is your responsibility to also free up all of the other objects */
202         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
203         /* before releasing the lock */
204         int completion_status;
205 };
206
207 static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
208
209 struct parahotplug_request {
210         struct list_head list;
211         int id;
212         unsigned long expiration;
213         struct controlvm_message msg;
214 };
215
216 static LIST_HEAD(parahotplug_request_list);
217 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
218 static void parahotplug_process_list(void);
219
220 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
221  * CONTROLVM_REPORTEVENT.
222  */
223 static struct visorchipset_busdev_notifiers busdev_server_notifiers;
224 static struct visorchipset_busdev_notifiers busdev_client_notifiers;
225
226 static void bus_create_response(ulong bus_no, int response);
227 static void bus_destroy_response(ulong bus_no, int response);
228 static void device_create_response(ulong bus_no, ulong dev_no, int response);
229 static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
230 static void device_resume_response(ulong bus_no, ulong dev_no, int response);
231
232 static struct visorchipset_busdev_responders busdev_responders = {
233         .bus_create = bus_create_response,
234         .bus_destroy = bus_destroy_response,
235         .device_create = device_create_response,
236         .device_destroy = device_destroy_response,
237         .device_pause = visorchipset_device_pause_response,
238         .device_resume = device_resume_response,
239 };
240
241 /* info for /dev/visorchipset */
242 static dev_t major_dev = -1; /**< indicates major num for device */
243
244 /* prototypes for attributes */
245 static ssize_t toolaction_show(struct device *dev,
246                                struct device_attribute *attr, char *buf);
247 static ssize_t toolaction_store(struct device *dev,
248                                 struct device_attribute *attr,
249                                 const char *buf, size_t count);
250 static DEVICE_ATTR_RW(toolaction);
251
252 static ssize_t boottotool_show(struct device *dev,
253                                struct device_attribute *attr, char *buf);
254 static ssize_t boottotool_store(struct device *dev,
255                                 struct device_attribute *attr, const char *buf,
256                                 size_t count);
257 static DEVICE_ATTR_RW(boottotool);
258
259 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
260                           char *buf);
261 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
262                            const char *buf, size_t count);
263 static DEVICE_ATTR_RW(error);
264
265 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
266                            char *buf);
267 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
268                             const char *buf, size_t count);
269 static DEVICE_ATTR_RW(textid);
270
271 static ssize_t remaining_steps_show(struct device *dev,
272                                     struct device_attribute *attr, char *buf);
273 static ssize_t remaining_steps_store(struct device *dev,
274                                      struct device_attribute *attr,
275                                      const char *buf, size_t count);
276 static DEVICE_ATTR_RW(remaining_steps);
277
278 static ssize_t chipsetready_store(struct device *dev,
279                                   struct device_attribute *attr,
280                                   const char *buf, size_t count);
281 static DEVICE_ATTR_WO(chipsetready);
282
283 static ssize_t devicedisabled_store(struct device *dev,
284                                     struct device_attribute *attr,
285                                     const char *buf, size_t count);
286 static DEVICE_ATTR_WO(devicedisabled);
287
288 static ssize_t deviceenabled_store(struct device *dev,
289                                    struct device_attribute *attr,
290                                    const char *buf, size_t count);
291 static DEVICE_ATTR_WO(deviceenabled);
292
293 static struct attribute *visorchipset_install_attrs[] = {
294         &dev_attr_toolaction.attr,
295         &dev_attr_boottotool.attr,
296         &dev_attr_error.attr,
297         &dev_attr_textid.attr,
298         &dev_attr_remaining_steps.attr,
299         NULL
300 };
301
302 static struct attribute_group visorchipset_install_group = {
303         .name = "install",
304         .attrs = visorchipset_install_attrs
305 };
306
307 static struct attribute *visorchipset_guest_attrs[] = {
308         &dev_attr_chipsetready.attr,
309         NULL
310 };
311
312 static struct attribute_group visorchipset_guest_group = {
313         .name = "guest",
314         .attrs = visorchipset_guest_attrs
315 };
316
317 static struct attribute *visorchipset_parahotplug_attrs[] = {
318         &dev_attr_devicedisabled.attr,
319         &dev_attr_deviceenabled.attr,
320         NULL
321 };
322
323 static struct attribute_group visorchipset_parahotplug_group = {
324         .name = "parahotplug",
325         .attrs = visorchipset_parahotplug_attrs
326 };
327
328 static const struct attribute_group *visorchipset_dev_groups[] = {
329         &visorchipset_install_group,
330         &visorchipset_guest_group,
331         &visorchipset_parahotplug_group,
332         NULL
333 };
334
335 /* /sys/devices/platform/visorchipset */
336 static struct platform_device visorchipset_platform_device = {
337         .name = "visorchipset",
338         .id = -1,
339         .dev.groups = visorchipset_dev_groups,
340 };
341
342 /* Function prototypes */
343 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
344                               int response);
345 static void controlvm_respond_chipset_init(
346                 struct controlvm_message_header *msg_hdr, int response,
347                 enum ultra_chipset_feature features);
348 static void controlvm_respond_physdev_changestate(
349                 struct controlvm_message_header *msg_hdr, int response,
350                 struct spar_segment_state state);
351
352 static ssize_t toolaction_show(struct device *dev,
353                                struct device_attribute *attr,
354                                char *buf)
355 {
356         u8 tool_action;
357
358         visorchannel_read(controlvm_channel,
359                 offsetof(struct spar_controlvm_channel_protocol,
360                          tool_action), &tool_action, sizeof(u8));
361         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
362 }
363
364 static ssize_t toolaction_store(struct device *dev,
365                                 struct device_attribute *attr,
366                                 const char *buf, size_t count)
367 {
368         u8 tool_action;
369         int ret;
370
371         if (kstrtou8(buf, 10, &tool_action) != 0)
372                 return -EINVAL;
373
374         ret = visorchannel_write(controlvm_channel,
375                 offsetof(struct spar_controlvm_channel_protocol,
376                          tool_action),
377                 &tool_action, sizeof(u8));
378
379         if (ret)
380                 return ret;
381         return count;
382 }
383
384 static ssize_t boottotool_show(struct device *dev,
385                                struct device_attribute *attr,
386                                char *buf)
387 {
388         struct efi_spar_indication efi_spar_indication;
389
390         visorchannel_read(controlvm_channel,
391                           offsetof(struct spar_controlvm_channel_protocol,
392                                    efi_spar_ind), &efi_spar_indication,
393                           sizeof(struct efi_spar_indication));
394         return scnprintf(buf, PAGE_SIZE, "%u\n",
395                          efi_spar_indication.boot_to_tool);
396 }
397
398 static ssize_t boottotool_store(struct device *dev,
399                                 struct device_attribute *attr,
400                                 const char *buf, size_t count)
401 {
402         int val, ret;
403         struct efi_spar_indication efi_spar_indication;
404
405         if (kstrtoint(buf, 10, &val) != 0)
406                 return -EINVAL;
407
408         efi_spar_indication.boot_to_tool = val;
409         ret = visorchannel_write(controlvm_channel,
410                         offsetof(struct spar_controlvm_channel_protocol,
411                                  efi_spar_ind), &(efi_spar_indication),
412                                  sizeof(struct efi_spar_indication));
413
414         if (ret)
415                 return ret;
416         return count;
417 }
418
419 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
420                           char *buf)
421 {
422         u32 error;
423
424         visorchannel_read(controlvm_channel,
425                           offsetof(struct spar_controlvm_channel_protocol,
426                                    installation_error),
427                           &error, sizeof(u32));
428         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
429 }
430
431 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
432                            const char *buf, size_t count)
433 {
434         u32 error;
435         int ret;
436
437         if (kstrtou32(buf, 10, &error) != 0)
438                 return -EINVAL;
439
440         ret = visorchannel_write(controlvm_channel,
441                 offsetof(struct spar_controlvm_channel_protocol,
442                          installation_error),
443                 &error, sizeof(u32));
444         if (ret)
445                 return ret;
446         return count;
447 }
448
449 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
450                            char *buf)
451 {
452         u32 text_id;
453
454         visorchannel_read(controlvm_channel,
455                           offsetof(struct spar_controlvm_channel_protocol,
456                                    installation_text_id),
457                           &text_id, sizeof(u32));
458         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
459 }
460
461 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
462                             const char *buf, size_t count)
463 {
464         u32 text_id;
465         int ret;
466
467         if (kstrtou32(buf, 10, &text_id) != 0)
468                 return -EINVAL;
469
470         ret = visorchannel_write(controlvm_channel,
471                 offsetof(struct spar_controlvm_channel_protocol,
472                          installation_text_id),
473                 &text_id, sizeof(u32));
474         if (ret)
475                 return ret;
476         return count;
477 }
478
479 static ssize_t remaining_steps_show(struct device *dev,
480                                     struct device_attribute *attr, char *buf)
481 {
482         u16 remaining_steps;
483
484         visorchannel_read(controlvm_channel,
485                           offsetof(struct spar_controlvm_channel_protocol,
486                                    installation_remaining_steps),
487                           &remaining_steps, sizeof(u16));
488         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
489 }
490
491 static ssize_t remaining_steps_store(struct device *dev,
492                                      struct device_attribute *attr,
493                                      const char *buf, size_t count)
494 {
495         u16 remaining_steps;
496         int ret;
497
498         if (kstrtou16(buf, 10, &remaining_steps) != 0)
499                 return -EINVAL;
500
501         ret = visorchannel_write(controlvm_channel,
502                 offsetof(struct spar_controlvm_channel_protocol,
503                          installation_remaining_steps),
504                 &remaining_steps, sizeof(u16));
505         if (ret)
506                 return ret;
507         return count;
508 }
509
510 static void
511 bus_info_clear(void *v)
512 {
513         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
514
515         kfree(p->name);
516         p->name = NULL;
517
518         kfree(p->description);
519         p->description = NULL;
520
521         p->state.created = 0;
522         memset(p, 0, sizeof(struct visorchipset_bus_info));
523 }
524
525 static void
526 dev_info_clear(void *v)
527 {
528         struct visorchipset_device_info *p =
529                         (struct visorchipset_device_info *)(v);
530
531         p->state.created = 0;
532         memset(p, 0, sizeof(struct visorchipset_device_info));
533 }
534
535 static u8
536 check_chipset_events(void)
537 {
538         int i;
539         u8 send_msg = 1;
540         /* Check events to determine if response should be sent */
541         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
542                 send_msg &= chipset_events[i];
543         return send_msg;
544 }
545
546 static void
547 clear_chipset_events(void)
548 {
549         int i;
550         /* Clear chipset_events */
551         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
552                 chipset_events[i] = 0;
553 }
554
555 void
556 visorchipset_register_busdev_server(
557                         struct visorchipset_busdev_notifiers *notifiers,
558                         struct visorchipset_busdev_responders *responders,
559                         struct ultra_vbus_deviceinfo *driver_info)
560 {
561         down(&notifier_lock);
562         if (!notifiers) {
563                 memset(&busdev_server_notifiers, 0,
564                        sizeof(busdev_server_notifiers));
565                 serverregistered = 0;   /* clear flag */
566         } else {
567                 busdev_server_notifiers = *notifiers;
568                 serverregistered = 1;   /* set flag */
569         }
570         if (responders)
571                 *responders = busdev_responders;
572         if (driver_info)
573                 bus_device_info_init(driver_info, "chipset", "visorchipset",
574                                      VERSION, NULL);
575
576         up(&notifier_lock);
577 }
578 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
579
580 void
581 visorchipset_register_busdev_client(
582                         struct visorchipset_busdev_notifiers *notifiers,
583                         struct visorchipset_busdev_responders *responders,
584                         struct ultra_vbus_deviceinfo *driver_info)
585 {
586         down(&notifier_lock);
587         if (!notifiers) {
588                 memset(&busdev_client_notifiers, 0,
589                        sizeof(busdev_client_notifiers));
590                 clientregistered = 0;   /* clear flag */
591         } else {
592                 busdev_client_notifiers = *notifiers;
593                 clientregistered = 1;   /* set flag */
594         }
595         if (responders)
596                 *responders = busdev_responders;
597         if (driver_info)
598                 bus_device_info_init(driver_info, "chipset(bolts)",
599                                      "visorchipset", VERSION, NULL);
600         up(&notifier_lock);
601 }
602 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
603
604 static void
605 cleanup_controlvm_structures(void)
606 {
607         struct visorchipset_bus_info *bi, *tmp_bi;
608         struct visorchipset_device_info *di, *tmp_di;
609
610         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
611                 bus_info_clear(bi);
612                 list_del(&bi->entry);
613                 kfree(bi);
614         }
615
616         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
617                 dev_info_clear(di);
618                 list_del(&di->entry);
619                 kfree(di);
620         }
621 }
622
623 static void
624 chipset_init(struct controlvm_message *inmsg)
625 {
626         static int chipset_inited;
627         enum ultra_chipset_feature features = 0;
628         int rc = CONTROLVM_RESP_SUCCESS;
629
630         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
631         if (chipset_inited) {
632                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
633                 goto cleanup;
634         }
635         chipset_inited = 1;
636         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
637
638         /* Set features to indicate we support parahotplug (if Command
639          * also supports it). */
640         features =
641             inmsg->cmd.init_chipset.
642             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
643
644         /* Set the "reply" bit so Command knows this is a
645          * features-aware driver. */
646         features |= ULTRA_CHIPSET_FEATURE_REPLY;
647
648 cleanup:
649         if (rc < 0)
650                 cleanup_controlvm_structures();
651         if (inmsg->hdr.flags.response_expected)
652                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
653 }
654
655 static void
656 controlvm_init_response(struct controlvm_message *msg,
657                         struct controlvm_message_header *msg_hdr, int response)
658 {
659         memset(msg, 0, sizeof(struct controlvm_message));
660         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
661         msg->hdr.payload_bytes = 0;
662         msg->hdr.payload_vm_offset = 0;
663         msg->hdr.payload_max_bytes = 0;
664         if (response < 0) {
665                 msg->hdr.flags.failed = 1;
666                 msg->hdr.completion_status = (u32) (-response);
667         }
668 }
669
670 static void
671 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
672 {
673         struct controlvm_message outmsg;
674
675         controlvm_init_response(&outmsg, msg_hdr, response);
676         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
677         * back the deviceChangeState structure in the packet. */
678         if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
679             g_devicechangestate_packet.device_change_state.bus_no ==
680             g_diagpool_bus_no &&
681             g_devicechangestate_packet.device_change_state.dev_no ==
682             g_diagpool_dev_no)
683                 outmsg.cmd = g_devicechangestate_packet;
684         if (outmsg.hdr.flags.test_message == 1)
685                 return;
686
687         if (!visorchannel_signalinsert(controlvm_channel,
688                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
689                 return;
690         }
691 }
692
693 static void
694 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
695                                int response,
696                                enum ultra_chipset_feature features)
697 {
698         struct controlvm_message outmsg;
699
700         controlvm_init_response(&outmsg, msg_hdr, response);
701         outmsg.cmd.init_chipset.features = features;
702         if (!visorchannel_signalinsert(controlvm_channel,
703                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
704                 return;
705         }
706 }
707
708 static void controlvm_respond_physdev_changestate(
709                 struct controlvm_message_header *msg_hdr, int response,
710                 struct spar_segment_state state)
711 {
712         struct controlvm_message outmsg;
713
714         controlvm_init_response(&outmsg, msg_hdr, response);
715         outmsg.cmd.device_change_state.state = state;
716         outmsg.cmd.device_change_state.flags.phys_device = 1;
717         if (!visorchannel_signalinsert(controlvm_channel,
718                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
719                 return;
720         }
721 }
722
723 void
724 visorchipset_save_message(struct controlvm_message *msg,
725                           enum crash_obj_type type)
726 {
727         u32 crash_msg_offset;
728         u16 crash_msg_count;
729
730         /* get saved message count */
731         if (visorchannel_read(controlvm_channel,
732                               offsetof(struct spar_controlvm_channel_protocol,
733                                        saved_crash_message_count),
734                               &crash_msg_count, sizeof(u16)) < 0) {
735                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
736                                  POSTCODE_SEVERITY_ERR);
737                 return;
738         }
739
740         if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
741                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
742                                  crash_msg_count,
743                                  POSTCODE_SEVERITY_ERR);
744                 return;
745         }
746
747         /* get saved crash message offset */
748         if (visorchannel_read(controlvm_channel,
749                               offsetof(struct spar_controlvm_channel_protocol,
750                                        saved_crash_message_offset),
751                               &crash_msg_offset, sizeof(u32)) < 0) {
752                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
753                                  POSTCODE_SEVERITY_ERR);
754                 return;
755         }
756
757         if (type == CRASH_BUS) {
758                 if (visorchannel_write(controlvm_channel,
759                                        crash_msg_offset,
760                                        msg,
761                                        sizeof(struct controlvm_message)) < 0) {
762                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
763                                          POSTCODE_SEVERITY_ERR);
764                         return;
765                 }
766         } else {
767                 if (visorchannel_write(controlvm_channel,
768                                        crash_msg_offset +
769                                        sizeof(struct controlvm_message), msg,
770                                        sizeof(struct controlvm_message)) < 0) {
771                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
772                                          POSTCODE_SEVERITY_ERR);
773                         return;
774                 }
775         }
776 }
777 EXPORT_SYMBOL_GPL(visorchipset_save_message);
778
779 static void
780 bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
781 {
782         struct visorchipset_bus_info *p = NULL;
783         BOOL need_clear = FALSE;
784
785         p = findbus(&bus_info_list, bus_no);
786         if (!p)
787                 return;
788
789         if (response < 0) {
790                 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
791                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
792                         /* undo the row we just created... */
793                         delbusdevices(&dev_info_list, bus_no);
794         } else {
795                 if (cmd_id == CONTROLVM_BUS_CREATE)
796                         p->state.created = 1;
797                 if (cmd_id == CONTROLVM_BUS_DESTROY)
798                         need_clear = TRUE;
799         }
800
801         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
802                 return;         /* no controlvm response needed */
803         if (p->pending_msg_hdr.id != (u32)cmd_id)
804                 return;
805         controlvm_respond(&p->pending_msg_hdr, response);
806         p->pending_msg_hdr.id = CONTROLVM_INVALID;
807         if (need_clear) {
808                 bus_info_clear(p);
809                 delbusdevices(&dev_info_list, bus_no);
810         }
811 }
812
813 static void
814 device_changestate_responder(enum controlvm_id cmd_id,
815                              ulong bus_no, ulong dev_no, int response,
816                              struct spar_segment_state response_state)
817 {
818         struct visorchipset_device_info *p = NULL;
819         struct controlvm_message outmsg;
820
821         p = finddevice(&dev_info_list, bus_no, dev_no);
822         if (!p)
823                 return;
824         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
825                 return;         /* no controlvm response needed */
826         if (p->pending_msg_hdr.id != cmd_id)
827                 return;
828
829         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
830
831         outmsg.cmd.device_change_state.bus_no = bus_no;
832         outmsg.cmd.device_change_state.dev_no = dev_no;
833         outmsg.cmd.device_change_state.state = response_state;
834
835         if (!visorchannel_signalinsert(controlvm_channel,
836                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
837                 return;
838
839         p->pending_msg_hdr.id = CONTROLVM_INVALID;
840 }
841
842 static void
843 device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
844                  int response)
845 {
846         struct visorchipset_device_info *p = NULL;
847         BOOL need_clear = FALSE;
848
849         p = finddevice(&dev_info_list, bus_no, dev_no);
850         if (!p)
851                 return;
852         if (response >= 0) {
853                 if (cmd_id == CONTROLVM_DEVICE_CREATE)
854                         p->state.created = 1;
855                 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
856                         need_clear = TRUE;
857         }
858
859         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
860                 return;         /* no controlvm response needed */
861
862         if (p->pending_msg_hdr.id != (u32)cmd_id)
863                 return;
864
865         controlvm_respond(&p->pending_msg_hdr, response);
866         p->pending_msg_hdr.id = CONTROLVM_INVALID;
867         if (need_clear)
868                 dev_info_clear(p);
869 }
870
871 static void
872 bus_epilog(u32 bus_no,
873            u32 cmd, struct controlvm_message_header *msg_hdr,
874            int response, BOOL need_response)
875 {
876         BOOL notified = FALSE;
877
878         struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
879                                                          bus_no);
880
881         if (!bus_info)
882                 return;
883
884         if (need_response) {
885                 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
886                        sizeof(struct controlvm_message_header));
887         } else {
888                 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
889         }
890
891         down(&notifier_lock);
892         if (response == CONTROLVM_RESP_SUCCESS) {
893                 switch (cmd) {
894                 case CONTROLVM_BUS_CREATE:
895                         /* We can't tell from the bus_create
896                         * information which of our 2 bus flavors the
897                         * devices on this bus will ultimately end up.
898                         * FORTUNATELY, it turns out it is harmless to
899                         * send the bus_create to both of them.  We can
900                         * narrow things down a little bit, though,
901                         * because we know: - BusDev_Server can handle
902                         * either server or client devices
903                         * - BusDev_Client can handle ONLY client
904                         * devices */
905                         if (busdev_server_notifiers.bus_create) {
906                                 (*busdev_server_notifiers.bus_create) (bus_no);
907                                 notified = TRUE;
908                         }
909                         if ((!bus_info->flags.server) /*client */ &&
910                             busdev_client_notifiers.bus_create) {
911                                 (*busdev_client_notifiers.bus_create) (bus_no);
912                                 notified = TRUE;
913                         }
914                         break;
915                 case CONTROLVM_BUS_DESTROY:
916                         if (busdev_server_notifiers.bus_destroy) {
917                                 (*busdev_server_notifiers.bus_destroy) (bus_no);
918                                 notified = TRUE;
919                         }
920                         if ((!bus_info->flags.server) /*client */ &&
921                             busdev_client_notifiers.bus_destroy) {
922                                 (*busdev_client_notifiers.bus_destroy) (bus_no);
923                                 notified = TRUE;
924                         }
925                         break;
926                 }
927         }
928         if (notified)
929                 /* The callback function just called above is responsible
930                  * for calling the appropriate visorchipset_busdev_responders
931                  * function, which will call bus_responder()
932                  */
933                 ;
934         else
935                 bus_responder(cmd, bus_no, response);
936         up(&notifier_lock);
937 }
938
939 static void
940 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
941               struct controlvm_message_header *msg_hdr, int response,
942               BOOL need_response, BOOL for_visorbus)
943 {
944         struct visorchipset_busdev_notifiers *notifiers = NULL;
945         BOOL notified = FALSE;
946
947         struct visorchipset_device_info *dev_info =
948                 finddevice(&dev_info_list, bus_no, dev_no);
949         char *envp[] = {
950                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
951                 NULL
952         };
953
954         if (!dev_info)
955                 return;
956
957         if (for_visorbus)
958                 notifiers = &busdev_server_notifiers;
959         else
960                 notifiers = &busdev_client_notifiers;
961         if (need_response) {
962                 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
963                        sizeof(struct controlvm_message_header));
964         } else {
965                 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
966         }
967
968         down(&notifier_lock);
969         if (response >= 0) {
970                 switch (cmd) {
971                 case CONTROLVM_DEVICE_CREATE:
972                         if (notifiers->device_create) {
973                                 (*notifiers->device_create) (bus_no, dev_no);
974                                 notified = TRUE;
975                         }
976                         break;
977                 case CONTROLVM_DEVICE_CHANGESTATE:
978                         /* ServerReady / ServerRunning / SegmentStateRunning */
979                         if (state.alive == segment_state_running.alive &&
980                             state.operating ==
981                                 segment_state_running.operating) {
982                                 if (notifiers->device_resume) {
983                                         (*notifiers->device_resume) (bus_no,
984                                                                      dev_no);
985                                         notified = TRUE;
986                                 }
987                         }
988                         /* ServerNotReady / ServerLost / SegmentStateStandby */
989                         else if (state.alive == segment_state_standby.alive &&
990                                  state.operating ==
991                                  segment_state_standby.operating) {
992                                 /* technically this is standby case
993                                  * where server is lost
994                                  */
995                                 if (notifiers->device_pause) {
996                                         (*notifiers->device_pause) (bus_no,
997                                                                     dev_no);
998                                         notified = TRUE;
999                                 }
1000                         } else if (state.alive == segment_state_paused.alive &&
1001                                    state.operating ==
1002                                    segment_state_paused.operating) {
1003                                 /* this is lite pause where channel is
1004                                  * still valid just 'pause' of it
1005                                  */
1006                                 if (bus_no == g_diagpool_bus_no &&
1007                                     dev_no == g_diagpool_dev_no) {
1008                                         /* this will trigger the
1009                                          * diag_shutdown.sh script in
1010                                          * the visorchipset hotplug */
1011                                         kobject_uevent_env
1012                                             (&visorchipset_platform_device.dev.
1013                                              kobj, KOBJ_ONLINE, envp);
1014                                 }
1015                         }
1016                         break;
1017                 case CONTROLVM_DEVICE_DESTROY:
1018                         if (notifiers->device_destroy) {
1019                                 (*notifiers->device_destroy) (bus_no, dev_no);
1020                                 notified = TRUE;
1021                         }
1022                         break;
1023                 }
1024         }
1025         if (notified)
1026                 /* The callback function just called above is responsible
1027                  * for calling the appropriate visorchipset_busdev_responders
1028                  * function, which will call device_responder()
1029                  */
1030                 ;
1031         else
1032                 device_responder(cmd, bus_no, dev_no, response);
1033         up(&notifier_lock);
1034 }
1035
1036 static void
1037 bus_create(struct controlvm_message *inmsg)
1038 {
1039         struct controlvm_message_packet *cmd = &inmsg->cmd;
1040         ulong bus_no = cmd->create_bus.bus_no;
1041         int rc = CONTROLVM_RESP_SUCCESS;
1042         struct visorchipset_bus_info *bus_info = NULL;
1043
1044         bus_info = findbus(&bus_info_list, bus_no);
1045         if (bus_info && (bus_info->state.created == 1)) {
1046                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1047                                  POSTCODE_SEVERITY_ERR);
1048                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1049                 goto cleanup;
1050         }
1051         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1052         if (!bus_info) {
1053                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1054                                  POSTCODE_SEVERITY_ERR);
1055                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1056                 goto cleanup;
1057         }
1058
1059         INIT_LIST_HEAD(&bus_info->entry);
1060         bus_info->bus_no = bus_no;
1061         bus_info->dev_no = cmd->create_bus.dev_count;
1062
1063         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1064
1065         if (inmsg->hdr.flags.test_message == 1)
1066                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1067         else
1068                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1069
1070         bus_info->flags.server = inmsg->hdr.flags.server;
1071         bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1072         bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1073         bus_info->chan_info.channel_type_uuid =
1074                         cmd->create_bus.bus_data_type_uuid;
1075         bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1076
1077         list_add(&bus_info->entry, &bus_info_list);
1078
1079         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1080
1081 cleanup:
1082         bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1083                    rc, inmsg->hdr.flags.response_expected == 1);
1084 }
1085
1086 static void
1087 bus_destroy(struct controlvm_message *inmsg)
1088 {
1089         struct controlvm_message_packet *cmd = &inmsg->cmd;
1090         ulong bus_no = cmd->destroy_bus.bus_no;
1091         struct visorchipset_bus_info *bus_info;
1092         int rc = CONTROLVM_RESP_SUCCESS;
1093
1094         bus_info = findbus(&bus_info_list, bus_no);
1095         if (!bus_info)
1096                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1097         else if (bus_info->state.created == 0)
1098                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1099
1100         bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1101                    rc, inmsg->hdr.flags.response_expected == 1);
1102 }
1103
1104 static void
1105 bus_configure(struct controlvm_message *inmsg,
1106               struct parser_context *parser_ctx)
1107 {
1108         struct controlvm_message_packet *cmd = &inmsg->cmd;
1109         ulong bus_no = cmd->configure_bus.bus_no;
1110         struct visorchipset_bus_info *bus_info = NULL;
1111         int rc = CONTROLVM_RESP_SUCCESS;
1112         char s[99];
1113
1114         bus_no = cmd->configure_bus.bus_no;
1115         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1116                          POSTCODE_SEVERITY_INFO);
1117
1118         bus_info = findbus(&bus_info_list, bus_no);
1119         if (!bus_info) {
1120                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1121                                  POSTCODE_SEVERITY_ERR);
1122                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1123         } else if (bus_info->state.created == 0) {
1124                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1125                                  POSTCODE_SEVERITY_ERR);
1126                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1127         } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1128                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1129                                  POSTCODE_SEVERITY_ERR);
1130                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1131         } else {
1132                 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1133                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1134                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1135                 bus_info->name = parser_string_get(parser_ctx);
1136
1137                 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1138                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1139                                  POSTCODE_SEVERITY_INFO);
1140         }
1141         bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1142                    rc, inmsg->hdr.flags.response_expected == 1);
1143 }
1144
1145 static void
1146 my_device_create(struct controlvm_message *inmsg)
1147 {
1148         struct controlvm_message_packet *cmd = &inmsg->cmd;
1149         ulong bus_no = cmd->create_device.bus_no;
1150         ulong dev_no = cmd->create_device.dev_no;
1151         struct visorchipset_device_info *dev_info = NULL;
1152         struct visorchipset_bus_info *bus_info = NULL;
1153         int rc = CONTROLVM_RESP_SUCCESS;
1154
1155         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1156         if (dev_info && (dev_info->state.created == 1)) {
1157                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1158                                  POSTCODE_SEVERITY_ERR);
1159                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1160                 goto cleanup;
1161         }
1162         bus_info = findbus(&bus_info_list, bus_no);
1163         if (!bus_info) {
1164                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1165                                  POSTCODE_SEVERITY_ERR);
1166                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1167                 goto cleanup;
1168         }
1169         if (bus_info->state.created == 0) {
1170                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1171                                  POSTCODE_SEVERITY_ERR);
1172                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1173                 goto cleanup;
1174         }
1175         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1176         if (!dev_info) {
1177                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1178                                  POSTCODE_SEVERITY_ERR);
1179                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1180                 goto cleanup;
1181         }
1182
1183         INIT_LIST_HEAD(&dev_info->entry);
1184         dev_info->bus_no = bus_no;
1185         dev_info->dev_no = dev_no;
1186         dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1187         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1188                          POSTCODE_SEVERITY_INFO);
1189
1190         if (inmsg->hdr.flags.test_message == 1)
1191                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1192         else
1193                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1194         dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1195         dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1196         dev_info->chan_info.channel_type_uuid =
1197                         cmd->create_device.data_type_uuid;
1198         dev_info->chan_info.intr = cmd->create_device.intr;
1199         list_add(&dev_info->entry, &dev_info_list);
1200         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1201                          POSTCODE_SEVERITY_INFO);
1202 cleanup:
1203         /* get the bus and devNo for DiagPool channel */
1204         if (dev_info &&
1205             is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1206                 g_diagpool_bus_no = bus_no;
1207                 g_diagpool_dev_no = dev_no;
1208         }
1209         device_epilog(bus_no, dev_no, segment_state_running,
1210                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1211                       inmsg->hdr.flags.response_expected == 1,
1212                       FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1213 }
1214
1215 static void
1216 my_device_changestate(struct controlvm_message *inmsg)
1217 {
1218         struct controlvm_message_packet *cmd = &inmsg->cmd;
1219         ulong bus_no = cmd->device_change_state.bus_no;
1220         ulong dev_no = cmd->device_change_state.dev_no;
1221         struct spar_segment_state state = cmd->device_change_state.state;
1222         struct visorchipset_device_info *dev_info = NULL;
1223         int rc = CONTROLVM_RESP_SUCCESS;
1224
1225         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1226         if (!dev_info) {
1227                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1228                                  POSTCODE_SEVERITY_ERR);
1229                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1230         } else if (dev_info->state.created == 0) {
1231                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1232                                  POSTCODE_SEVERITY_ERR);
1233                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1234         }
1235         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1236                 device_epilog(bus_no, dev_no, state,
1237                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1238                               inmsg->hdr.flags.response_expected == 1,
1239                               FOR_VISORBUS(
1240                                         dev_info->chan_info.channel_type_uuid));
1241 }
1242
1243 static void
1244 my_device_destroy(struct controlvm_message *inmsg)
1245 {
1246         struct controlvm_message_packet *cmd = &inmsg->cmd;
1247         ulong bus_no = cmd->destroy_device.bus_no;
1248         ulong dev_no = cmd->destroy_device.dev_no;
1249         struct visorchipset_device_info *dev_info = NULL;
1250         int rc = CONTROLVM_RESP_SUCCESS;
1251
1252         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1253         if (!dev_info)
1254                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1255         else if (dev_info->state.created == 0)
1256                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1257
1258         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1259                 device_epilog(bus_no, dev_no, segment_state_running,
1260                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1261                               inmsg->hdr.flags.response_expected == 1,
1262                               FOR_VISORBUS(
1263                                         dev_info->chan_info.channel_type_uuid));
1264 }
1265
1266 /* When provided with the physical address of the controlvm channel
1267  * (phys_addr), the offset to the payload area we need to manage
1268  * (offset), and the size of this payload area (bytes), fills in the
1269  * controlvm_payload_info struct.  Returns TRUE for success or FALSE
1270  * for failure.
1271  */
1272 static int
1273 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1274                                   struct controlvm_payload_info *info)
1275 {
1276         u8 __iomem *payload = NULL;
1277         int rc = CONTROLVM_RESP_SUCCESS;
1278
1279         if (!info) {
1280                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1281                 goto cleanup;
1282         }
1283         memset(info, 0, sizeof(struct controlvm_payload_info));
1284         if ((offset == 0) || (bytes == 0)) {
1285                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1286                 goto cleanup;
1287         }
1288         payload = ioremap_cache(phys_addr + offset, bytes);
1289         if (!payload) {
1290                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1291                 goto cleanup;
1292         }
1293
1294         info->offset = offset;
1295         info->bytes = bytes;
1296         info->ptr = payload;
1297
1298 cleanup:
1299         if (rc < 0) {
1300                 if (payload) {
1301                         iounmap(payload);
1302                         payload = NULL;
1303                 }
1304         }
1305         return rc;
1306 }
1307
1308 static void
1309 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1310 {
1311         if (info->ptr) {
1312                 iounmap(info->ptr);
1313                 info->ptr = NULL;
1314         }
1315         memset(info, 0, sizeof(struct controlvm_payload_info));
1316 }
1317
1318 static void
1319 initialize_controlvm_payload(void)
1320 {
1321         HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1322         u64 payload_offset = 0;
1323         u32 payload_bytes = 0;
1324
1325         if (visorchannel_read(controlvm_channel,
1326                               offsetof(struct spar_controlvm_channel_protocol,
1327                                        request_payload_offset),
1328                               &payload_offset, sizeof(payload_offset)) < 0) {
1329                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1330                                  POSTCODE_SEVERITY_ERR);
1331                 return;
1332         }
1333         if (visorchannel_read(controlvm_channel,
1334                               offsetof(struct spar_controlvm_channel_protocol,
1335                                        request_payload_bytes),
1336                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1337                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1338                                  POSTCODE_SEVERITY_ERR);
1339                 return;
1340         }
1341         initialize_controlvm_payload_info(phys_addr,
1342                                           payload_offset, payload_bytes,
1343                                           &controlvm_payload_info);
1344 }
1345
1346 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1347  *  Returns CONTROLVM_RESP_xxx code.
1348  */
1349 int
1350 visorchipset_chipset_ready(void)
1351 {
1352         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1353         return CONTROLVM_RESP_SUCCESS;
1354 }
1355 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1356
1357 int
1358 visorchipset_chipset_selftest(void)
1359 {
1360         char env_selftest[20];
1361         char *envp[] = { env_selftest, NULL };
1362
1363         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1364         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1365                            envp);
1366         return CONTROLVM_RESP_SUCCESS;
1367 }
1368 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1369
1370 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1371  *  Returns CONTROLVM_RESP_xxx code.
1372  */
1373 int
1374 visorchipset_chipset_notready(void)
1375 {
1376         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1377         return CONTROLVM_RESP_SUCCESS;
1378 }
1379 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1380
1381 static void
1382 chipset_ready(struct controlvm_message_header *msg_hdr)
1383 {
1384         int rc = visorchipset_chipset_ready();
1385
1386         if (rc != CONTROLVM_RESP_SUCCESS)
1387                 rc = -rc;
1388         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1389                 controlvm_respond(msg_hdr, rc);
1390         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1391                 /* Send CHIPSET_READY response when all modules have been loaded
1392                  * and disks mounted for the partition
1393                  */
1394                 g_chipset_msg_hdr = *msg_hdr;
1395         }
1396 }
1397
1398 static void
1399 chipset_selftest(struct controlvm_message_header *msg_hdr)
1400 {
1401         int rc = visorchipset_chipset_selftest();
1402
1403         if (rc != CONTROLVM_RESP_SUCCESS)
1404                 rc = -rc;
1405         if (msg_hdr->flags.response_expected)
1406                 controlvm_respond(msg_hdr, rc);
1407 }
1408
1409 static void
1410 chipset_notready(struct controlvm_message_header *msg_hdr)
1411 {
1412         int rc = visorchipset_chipset_notready();
1413
1414         if (rc != CONTROLVM_RESP_SUCCESS)
1415                 rc = -rc;
1416         if (msg_hdr->flags.response_expected)
1417                 controlvm_respond(msg_hdr, rc);
1418 }
1419
1420 /* This is your "one-stop" shop for grabbing the next message from the
1421  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1422  */
1423 static BOOL
1424 read_controlvm_event(struct controlvm_message *msg)
1425 {
1426         if (visorchannel_signalremove(controlvm_channel,
1427                                       CONTROLVM_QUEUE_EVENT, msg)) {
1428                 /* got a message */
1429                 if (msg->hdr.flags.test_message == 1)
1430                         return FALSE;
1431                 return TRUE;
1432         }
1433         return FALSE;
1434 }
1435
1436 /*
1437  * The general parahotplug flow works as follows.  The visorchipset
1438  * driver receives a DEVICE_CHANGESTATE message from Command
1439  * specifying a physical device to enable or disable.  The CONTROLVM
1440  * message handler calls parahotplug_process_message, which then adds
1441  * the message to a global list and kicks off a udev event which
1442  * causes a user level script to enable or disable the specified
1443  * device.  The udev script then writes to
1444  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1445  * to get called, at which point the appropriate CONTROLVM message is
1446  * retrieved from the list and responded to.
1447  */
1448
1449 #define PARAHOTPLUG_TIMEOUT_MS 2000
1450
1451 /*
1452  * Generate unique int to match an outstanding CONTROLVM message with a
1453  * udev script /proc response
1454  */
1455 static int
1456 parahotplug_next_id(void)
1457 {
1458         static atomic_t id = ATOMIC_INIT(0);
1459
1460         return atomic_inc_return(&id);
1461 }
1462
1463 /*
1464  * Returns the time (in jiffies) when a CONTROLVM message on the list
1465  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1466  */
1467 static unsigned long
1468 parahotplug_next_expiration(void)
1469 {
1470         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1471 }
1472
1473 /*
1474  * Create a parahotplug_request, which is basically a wrapper for a
1475  * CONTROLVM_MESSAGE that we can stick on a list
1476  */
1477 static struct parahotplug_request *
1478 parahotplug_request_create(struct controlvm_message *msg)
1479 {
1480         struct parahotplug_request *req;
1481
1482         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1483         if (!req)
1484                 return NULL;
1485
1486         req->id = parahotplug_next_id();
1487         req->expiration = parahotplug_next_expiration();
1488         req->msg = *msg;
1489
1490         return req;
1491 }
1492
1493 /*
1494  * Free a parahotplug_request.
1495  */
1496 static void
1497 parahotplug_request_destroy(struct parahotplug_request *req)
1498 {
1499         kfree(req);
1500 }
1501
1502 /*
1503  * Cause uevent to run the user level script to do the disable/enable
1504  * specified in (the CONTROLVM message in) the specified
1505  * parahotplug_request
1506  */
1507 static void
1508 parahotplug_request_kickoff(struct parahotplug_request *req)
1509 {
1510         struct controlvm_message_packet *cmd = &req->msg.cmd;
1511         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1512             env_func[40];
1513         char *envp[] = {
1514                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1515         };
1516
1517         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1518         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1519         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1520                 cmd->device_change_state.state.active);
1521         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1522                 cmd->device_change_state.bus_no);
1523         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1524                 cmd->device_change_state.dev_no >> 3);
1525         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1526                 cmd->device_change_state.dev_no & 0x7);
1527
1528         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1529                            envp);
1530 }
1531
1532 /*
1533  * Remove any request from the list that's been on there too long and
1534  * respond with an error.
1535  */
1536 static void
1537 parahotplug_process_list(void)
1538 {
1539         struct list_head *pos = NULL;
1540         struct list_head *tmp = NULL;
1541
1542         spin_lock(&parahotplug_request_list_lock);
1543
1544         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1545                 struct parahotplug_request *req =
1546                     list_entry(pos, struct parahotplug_request, list);
1547
1548                 if (!time_after_eq(jiffies, req->expiration))
1549                         continue;
1550
1551                 list_del(pos);
1552                 if (req->msg.hdr.flags.response_expected)
1553                         controlvm_respond_physdev_changestate(
1554                                 &req->msg.hdr,
1555                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1556                                 req->msg.cmd.device_change_state.state);
1557                 parahotplug_request_destroy(req);
1558         }
1559
1560         spin_unlock(&parahotplug_request_list_lock);
1561 }
1562
1563 /*
1564  * Called from the /proc handler, which means the user script has
1565  * finished the enable/disable.  Find the matching identifier, and
1566  * respond to the CONTROLVM message with success.
1567  */
1568 static int
1569 parahotplug_request_complete(int id, u16 active)
1570 {
1571         struct list_head *pos = NULL;
1572         struct list_head *tmp = NULL;
1573
1574         spin_lock(&parahotplug_request_list_lock);
1575
1576         /* Look for a request matching "id". */
1577         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1578                 struct parahotplug_request *req =
1579                     list_entry(pos, struct parahotplug_request, list);
1580                 if (req->id == id) {
1581                         /* Found a match.  Remove it from the list and
1582                          * respond.
1583                          */
1584                         list_del(pos);
1585                         spin_unlock(&parahotplug_request_list_lock);
1586                         req->msg.cmd.device_change_state.state.active = active;
1587                         if (req->msg.hdr.flags.response_expected)
1588                                 controlvm_respond_physdev_changestate(
1589                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1590                                         req->msg.cmd.device_change_state.state);
1591                         parahotplug_request_destroy(req);
1592                         return 0;
1593                 }
1594         }
1595
1596         spin_unlock(&parahotplug_request_list_lock);
1597         return -1;
1598 }
1599
1600 /*
1601  * Enables or disables a PCI device by kicking off a udev script
1602  */
1603 static void
1604 parahotplug_process_message(struct controlvm_message *inmsg)
1605 {
1606         struct parahotplug_request *req;
1607
1608         req = parahotplug_request_create(inmsg);
1609
1610         if (!req)
1611                 return;
1612
1613         if (inmsg->cmd.device_change_state.state.active) {
1614                 /* For enable messages, just respond with success
1615                 * right away.  This is a bit of a hack, but there are
1616                 * issues with the early enable messages we get (with
1617                 * either the udev script not detecting that the device
1618                 * is up, or not getting called at all).  Fortunately
1619                 * the messages that get lost don't matter anyway, as
1620                 * devices are automatically enabled at
1621                 * initialization.
1622                 */
1623                 parahotplug_request_kickoff(req);
1624                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1625                         CONTROLVM_RESP_SUCCESS,
1626                         inmsg->cmd.device_change_state.state);
1627                 parahotplug_request_destroy(req);
1628         } else {
1629                 /* For disable messages, add the request to the
1630                 * request list before kicking off the udev script.  It
1631                 * won't get responded to until the script has
1632                 * indicated it's done.
1633                 */
1634                 spin_lock(&parahotplug_request_list_lock);
1635                 list_add_tail(&req->list, &parahotplug_request_list);
1636                 spin_unlock(&parahotplug_request_list_lock);
1637
1638                 parahotplug_request_kickoff(req);
1639         }
1640 }
1641
1642 /* Process a controlvm message.
1643  * Return result:
1644  *    FALSE - this function will return FALSE only in the case where the
1645  *            controlvm message was NOT processed, but processing must be
1646  *            retried before reading the next controlvm message; a
1647  *            scenario where this can occur is when we need to throttle
1648  *            the allocation of memory in which to copy out controlvm
1649  *            payload data
1650  *    TRUE  - processing of the controlvm message completed,
1651  *            either successfully or with an error.
1652  */
1653 static BOOL
1654 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1655 {
1656         struct controlvm_message_packet *cmd = &inmsg.cmd;
1657         u64 parm_addr = 0;
1658         u32 parm_bytes = 0;
1659         struct parser_context *parser_ctx = NULL;
1660         bool local_addr = false;
1661         struct controlvm_message ackmsg;
1662
1663         /* create parsing context if necessary */
1664         local_addr = (inmsg.hdr.flags.test_message == 1);
1665         if (channel_addr == 0)
1666                 return TRUE;
1667         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1668         parm_bytes = inmsg.hdr.payload_bytes;
1669
1670         /* Parameter and channel addresses within test messages actually lie
1671          * within our OS-controlled memory.  We need to know that, because it
1672          * makes a difference in how we compute the virtual address.
1673          */
1674         if (parm_addr != 0 && parm_bytes != 0) {
1675                 BOOL retry = FALSE;
1676
1677                 parser_ctx =
1678                     parser_init_byte_stream(parm_addr, parm_bytes,
1679                                             local_addr, &retry);
1680                 if (!parser_ctx && retry)
1681                         return FALSE;
1682         }
1683
1684         if (!local_addr) {
1685                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1686                                         CONTROLVM_RESP_SUCCESS);
1687                 if (controlvm_channel)
1688                         visorchannel_signalinsert(controlvm_channel,
1689                                                   CONTROLVM_QUEUE_ACK,
1690                                                   &ackmsg);
1691         }
1692         switch (inmsg.hdr.id) {
1693         case CONTROLVM_CHIPSET_INIT:
1694                 chipset_init(&inmsg);
1695                 break;
1696         case CONTROLVM_BUS_CREATE:
1697                 bus_create(&inmsg);
1698                 break;
1699         case CONTROLVM_BUS_DESTROY:
1700                 bus_destroy(&inmsg);
1701                 break;
1702         case CONTROLVM_BUS_CONFIGURE:
1703                 bus_configure(&inmsg, parser_ctx);
1704                 break;
1705         case CONTROLVM_DEVICE_CREATE:
1706                 my_device_create(&inmsg);
1707                 break;
1708         case CONTROLVM_DEVICE_CHANGESTATE:
1709                 if (cmd->device_change_state.flags.phys_device) {
1710                         parahotplug_process_message(&inmsg);
1711                 } else {
1712                         /* save the hdr and cmd structures for later use */
1713                         /* when sending back the response to Command */
1714                         my_device_changestate(&inmsg);
1715                         g_diag_msg_hdr = inmsg.hdr;
1716                         g_devicechangestate_packet = inmsg.cmd;
1717                         break;
1718                 }
1719                 break;
1720         case CONTROLVM_DEVICE_DESTROY:
1721                 my_device_destroy(&inmsg);
1722                 break;
1723         case CONTROLVM_DEVICE_CONFIGURE:
1724                 /* no op for now, just send a respond that we passed */
1725                 if (inmsg.hdr.flags.response_expected)
1726                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1727                 break;
1728         case CONTROLVM_CHIPSET_READY:
1729                 chipset_ready(&inmsg.hdr);
1730                 break;
1731         case CONTROLVM_CHIPSET_SELFTEST:
1732                 chipset_selftest(&inmsg.hdr);
1733                 break;
1734         case CONTROLVM_CHIPSET_STOP:
1735                 chipset_notready(&inmsg.hdr);
1736                 break;
1737         default:
1738                 if (inmsg.hdr.flags.response_expected)
1739                         controlvm_respond(&inmsg.hdr,
1740                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1741                 break;
1742         }
1743
1744         if (parser_ctx) {
1745                 parser_done(parser_ctx);
1746                 parser_ctx = NULL;
1747         }
1748         return TRUE;
1749 }
1750
1751 static HOSTADDRESS controlvm_get_channel_address(void)
1752 {
1753         u64 addr = 0;
1754         u32 size = 0;
1755
1756         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1757                 return 0;
1758
1759         return addr;
1760 }
1761
1762 static void
1763 controlvm_periodic_work(struct work_struct *work)
1764 {
1765         struct controlvm_message inmsg;
1766         BOOL got_command = FALSE;
1767         BOOL handle_command_failed = FALSE;
1768         static u64 poll_count;
1769
1770         /* make sure visorbus server is registered for controlvm callbacks */
1771         if (visorchipset_serverregwait && !serverregistered)
1772                 goto cleanup;
1773         /* make sure visorclientbus server is regsitered for controlvm
1774          * callbacks
1775          */
1776         if (visorchipset_clientregwait && !clientregistered)
1777                 goto cleanup;
1778
1779         poll_count++;
1780         if (poll_count >= 250)
1781                 ;       /* keep going */
1782         else
1783                 goto cleanup;
1784
1785         /* Check events to determine if response to CHIPSET_READY
1786          * should be sent
1787          */
1788         if (visorchipset_holdchipsetready &&
1789             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1790                 if (check_chipset_events() == 1) {
1791                         controlvm_respond(&g_chipset_msg_hdr, 0);
1792                         clear_chipset_events();
1793                         memset(&g_chipset_msg_hdr, 0,
1794                                sizeof(struct controlvm_message_header));
1795                 }
1796         }
1797
1798         while (visorchannel_signalremove(controlvm_channel,
1799                                          CONTROLVM_QUEUE_RESPONSE,
1800                                          &inmsg))
1801                 ;
1802         if (!got_command) {
1803                 if (controlvm_pending_msg_valid) {
1804                         /* we throttled processing of a prior
1805                         * msg, so try to process it again
1806                         * rather than reading a new one
1807                         */
1808                         inmsg = controlvm_pending_msg;
1809                         controlvm_pending_msg_valid = FALSE;
1810                         got_command = true;
1811                 } else {
1812                         got_command = read_controlvm_event(&inmsg);
1813                 }
1814         }
1815
1816         handle_command_failed = FALSE;
1817         while (got_command && (!handle_command_failed)) {
1818                 most_recent_message_jiffies = jiffies;
1819                 if (handle_command(inmsg,
1820                                    visorchannel_get_physaddr
1821                                    (controlvm_channel)))
1822                         got_command = read_controlvm_event(&inmsg);
1823                 else {
1824                         /* this is a scenario where throttling
1825                         * is required, but probably NOT an
1826                         * error...; we stash the current
1827                         * controlvm msg so we will attempt to
1828                         * reprocess it on our next loop
1829                         */
1830                         handle_command_failed = TRUE;
1831                         controlvm_pending_msg = inmsg;
1832                         controlvm_pending_msg_valid = TRUE;
1833                 }
1834         }
1835
1836         /* parahotplug_worker */
1837         parahotplug_process_list();
1838
1839 cleanup:
1840
1841         if (time_after(jiffies,
1842                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1843                 /* it's been longer than MIN_IDLE_SECONDS since we
1844                 * processed our last controlvm message; slow down the
1845                 * polling
1846                 */
1847                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1848                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1849         } else {
1850                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1851                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1852         }
1853
1854         queue_delayed_work(periodic_controlvm_workqueue,
1855                            &periodic_controlvm_work, poll_jiffies);
1856 }
1857
1858 static void
1859 setup_crash_devices_work_queue(struct work_struct *work)
1860 {
1861         struct controlvm_message local_crash_bus_msg;
1862         struct controlvm_message local_crash_dev_msg;
1863         struct controlvm_message msg;
1864         u32 local_crash_msg_offset;
1865         u16 local_crash_msg_count;
1866
1867         /* make sure visorbus server is registered for controlvm callbacks */
1868         if (visorchipset_serverregwait && !serverregistered)
1869                 goto cleanup;
1870
1871         /* make sure visorclientbus server is regsitered for controlvm
1872          * callbacks
1873          */
1874         if (visorchipset_clientregwait && !clientregistered)
1875                 goto cleanup;
1876
1877         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1878
1879         /* send init chipset msg */
1880         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1881         msg.cmd.init_chipset.bus_count = 23;
1882         msg.cmd.init_chipset.switch_count = 0;
1883
1884         chipset_init(&msg);
1885
1886         /* get saved message count */
1887         if (visorchannel_read(controlvm_channel,
1888                               offsetof(struct spar_controlvm_channel_protocol,
1889                                        saved_crash_message_count),
1890                               &local_crash_msg_count, sizeof(u16)) < 0) {
1891                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1892                                  POSTCODE_SEVERITY_ERR);
1893                 return;
1894         }
1895
1896         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1897                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1898                                  local_crash_msg_count,
1899                                  POSTCODE_SEVERITY_ERR);
1900                 return;
1901         }
1902
1903         /* get saved crash message offset */
1904         if (visorchannel_read(controlvm_channel,
1905                               offsetof(struct spar_controlvm_channel_protocol,
1906                                        saved_crash_message_offset),
1907                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1908                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1909                                  POSTCODE_SEVERITY_ERR);
1910                 return;
1911         }
1912
1913         /* read create device message for storage bus offset */
1914         if (visorchannel_read(controlvm_channel,
1915                               local_crash_msg_offset,
1916                               &local_crash_bus_msg,
1917                               sizeof(struct controlvm_message)) < 0) {
1918                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1919                                  POSTCODE_SEVERITY_ERR);
1920                 return;
1921         }
1922
1923         /* read create device message for storage device */
1924         if (visorchannel_read(controlvm_channel,
1925                               local_crash_msg_offset +
1926                               sizeof(struct controlvm_message),
1927                               &local_crash_dev_msg,
1928                               sizeof(struct controlvm_message)) < 0) {
1929                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1930                                  POSTCODE_SEVERITY_ERR);
1931                 return;
1932         }
1933
1934         /* reuse IOVM create bus message */
1935         if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1936                 bus_create(&local_crash_bus_msg);
1937         } else {
1938                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1939                                  POSTCODE_SEVERITY_ERR);
1940                 return;
1941         }
1942
1943         /* reuse create device message for storage device */
1944         if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1945                 my_device_create(&local_crash_dev_msg);
1946         } else {
1947                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1948                                  POSTCODE_SEVERITY_ERR);
1949                 return;
1950         }
1951         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1952         return;
1953
1954 cleanup:
1955
1956         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1957
1958         queue_delayed_work(periodic_controlvm_workqueue,
1959                            &periodic_controlvm_work, poll_jiffies);
1960 }
1961
1962 static void
1963 bus_create_response(ulong bus_no, int response)
1964 {
1965         bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1966 }
1967
1968 static void
1969 bus_destroy_response(ulong bus_no, int response)
1970 {
1971         bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1972 }
1973
1974 static void
1975 device_create_response(ulong bus_no, ulong dev_no, int response)
1976 {
1977         device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1978 }
1979
1980 static void
1981 device_destroy_response(ulong bus_no, ulong dev_no, int response)
1982 {
1983         device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1984 }
1985
1986 void
1987 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1988 {
1989         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1990                                      bus_no, dev_no, response,
1991                                      segment_state_standby);
1992 }
1993 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1994
1995 static void
1996 device_resume_response(ulong bus_no, ulong dev_no, int response)
1997 {
1998         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1999                                      bus_no, dev_no, response,
2000                                      segment_state_running);
2001 }
2002
2003 BOOL
2004 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2005 {
2006         void *p = findbus(&bus_info_list, bus_no);
2007
2008         if (!p)
2009                 return FALSE;
2010         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2011         return TRUE;
2012 }
2013 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2014
2015 BOOL
2016 visorchipset_set_bus_context(ulong bus_no, void *context)
2017 {
2018         struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2019
2020         if (!p)
2021                 return FALSE;
2022         p->bus_driver_context = context;
2023         return TRUE;
2024 }
2025 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2026
2027 BOOL
2028 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2029                              struct visorchipset_device_info *dev_info)
2030 {
2031         void *p = finddevice(&dev_info_list, bus_no, dev_no);
2032
2033         if (!p)
2034                 return FALSE;
2035         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2036         return TRUE;
2037 }
2038 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2039
2040 BOOL
2041 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2042 {
2043         struct visorchipset_device_info *p =
2044                         finddevice(&dev_info_list, bus_no, dev_no);
2045
2046         if (!p)
2047                 return FALSE;
2048         p->bus_driver_context = context;
2049         return TRUE;
2050 }
2051 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2052
2053 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2054  */
2055 void *
2056 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2057                          char *fn, int ln)
2058 {
2059         gfp_t gfp;
2060         void *p;
2061
2062         if (ok_to_block)
2063                 gfp = GFP_KERNEL;
2064         else
2065                 gfp = GFP_ATOMIC;
2066         /* __GFP_NORETRY means "ok to fail", meaning
2067          * kmem_cache_alloc() can return NULL, implying the caller CAN
2068          * cope with failure.  If you do NOT specify __GFP_NORETRY,
2069          * Linux will go to extreme measures to get memory for you
2070          * (like, invoke oom killer), which will probably cripple the
2071          * system.
2072          */
2073         gfp |= __GFP_NORETRY;
2074         p = kmem_cache_alloc(pool, gfp);
2075         if (!p)
2076                 return NULL;
2077
2078         atomic_inc(&visorchipset_cache_buffers_in_use);
2079         return p;
2080 }
2081
2082 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2083  */
2084 void
2085 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2086 {
2087         if (!p)
2088                 return;
2089
2090         atomic_dec(&visorchipset_cache_buffers_in_use);
2091         kmem_cache_free(pool, p);
2092 }
2093
2094 static ssize_t chipsetready_store(struct device *dev,
2095                                   struct device_attribute *attr,
2096                                   const char *buf, size_t count)
2097 {
2098         char msgtype[64];
2099
2100         if (sscanf(buf, "%63s", msgtype) != 1)
2101                 return -EINVAL;
2102
2103         if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2104                 chipset_events[0] = 1;
2105                 return count;
2106         } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2107                 chipset_events[1] = 1;
2108                 return count;
2109         }
2110         return -EINVAL;
2111 }
2112
2113 /* The parahotplug/devicedisabled interface gets called by our support script
2114  * when an SR-IOV device has been shut down. The ID is passed to the script
2115  * and then passed back when the device has been removed.
2116  */
2117 static ssize_t devicedisabled_store(struct device *dev,
2118                                     struct device_attribute *attr,
2119                                     const char *buf, size_t count)
2120 {
2121         uint id;
2122
2123         if (kstrtouint(buf, 10, &id) != 0)
2124                 return -EINVAL;
2125
2126         parahotplug_request_complete(id, 0);
2127         return count;
2128 }
2129
2130 /* The parahotplug/deviceenabled interface gets called by our support script
2131  * when an SR-IOV device has been recovered. The ID is passed to the script
2132  * and then passed back when the device has been brought back up.
2133  */
2134 static ssize_t deviceenabled_store(struct device *dev,
2135                                    struct device_attribute *attr,
2136                                    const char *buf, size_t count)
2137 {
2138         uint id;
2139
2140         if (kstrtouint(buf, 10, &id) != 0)
2141                 return -EINVAL;
2142
2143         parahotplug_request_complete(id, 1);
2144         return count;
2145 }
2146
2147 static int __init
2148 visorchipset_init(void)
2149 {
2150         int rc = 0, x = 0;
2151         HOSTADDRESS addr;
2152
2153         if (!unisys_spar_platform)
2154                 return -ENODEV;
2155
2156         memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2157         memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2158         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2159         memset(&livedump_info, 0, sizeof(livedump_info));
2160         atomic_set(&livedump_info.buffers_in_use, 0);
2161
2162         if (visorchipset_testvnic) {
2163                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2164                 rc = x;
2165                 goto cleanup;
2166         }
2167
2168         addr = controlvm_get_channel_address();
2169         if (addr != 0) {
2170                 controlvm_channel =
2171                     visorchannel_create_with_lock
2172                     (addr,
2173                      sizeof(struct spar_controlvm_channel_protocol),
2174                      spar_controlvm_channel_protocol_uuid);
2175                 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2176                                 visorchannel_get_header(controlvm_channel))) {
2177                         initialize_controlvm_payload();
2178                 } else {
2179                         visorchannel_destroy(controlvm_channel);
2180                         controlvm_channel = NULL;
2181                         return -ENODEV;
2182                 }
2183         } else {
2184                 return -ENODEV;
2185         }
2186
2187         major_dev = MKDEV(visorchipset_major, 0);
2188         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2189         if (rc < 0) {
2190                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2191                 goto cleanup;
2192         }
2193
2194         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2195
2196         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2197
2198         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2199
2200         putfile_buffer_list_pool =
2201             kmem_cache_create(putfile_buffer_list_pool_name,
2202                               sizeof(struct putfile_buffer_entry),
2203                               0, SLAB_HWCACHE_ALIGN, NULL);
2204         if (!putfile_buffer_list_pool) {
2205                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2206                 rc = -1;
2207                 goto cleanup;
2208         }
2209         if (!visorchipset_disable_controlvm) {
2210                 /* if booting in a crash kernel */
2211                 if (is_kdump_kernel())
2212                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2213                                           setup_crash_devices_work_queue);
2214                 else
2215                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2216                                           controlvm_periodic_work);
2217                 periodic_controlvm_workqueue =
2218                     create_singlethread_workqueue("visorchipset_controlvm");
2219
2220                 if (!periodic_controlvm_workqueue) {
2221                         POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2222                                          DIAG_SEVERITY_ERR);
2223                         rc = -ENOMEM;
2224                         goto cleanup;
2225                 }
2226                 most_recent_message_jiffies = jiffies;
2227                 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2228                 rc = queue_delayed_work(periodic_controlvm_workqueue,
2229                                         &periodic_controlvm_work, poll_jiffies);
2230                 if (rc < 0) {
2231                         POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2232                                          DIAG_SEVERITY_ERR);
2233                         goto cleanup;
2234                 }
2235         }
2236
2237         visorchipset_platform_device.dev.devt = major_dev;
2238         if (platform_device_register(&visorchipset_platform_device) < 0) {
2239                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2240                 rc = -1;
2241                 goto cleanup;
2242         }
2243         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2244         rc = 0;
2245 cleanup:
2246         if (rc) {
2247                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2248                                  POSTCODE_SEVERITY_ERR);
2249         }
2250         return rc;
2251 }
2252
2253 static void
2254 visorchipset_exit(void)
2255 {
2256         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2257
2258         if (visorchipset_disable_controlvm) {
2259                 ;
2260         } else {
2261                 cancel_delayed_work(&periodic_controlvm_work);
2262                 flush_workqueue(periodic_controlvm_workqueue);
2263                 destroy_workqueue(periodic_controlvm_workqueue);
2264                 periodic_controlvm_workqueue = NULL;
2265                 destroy_controlvm_payload_info(&controlvm_payload_info);
2266         }
2267         if (putfile_buffer_list_pool) {
2268                 kmem_cache_destroy(putfile_buffer_list_pool);
2269                 putfile_buffer_list_pool = NULL;
2270         }
2271
2272         cleanup_controlvm_structures();
2273
2274         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2275
2276         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2277
2278         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2279
2280         visorchannel_destroy(controlvm_channel);
2281
2282         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2283         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2284 }
2285
2286 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2287 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2288 int visorchipset_testvnic = 0;
2289
2290 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2291 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2292 int visorchipset_testvnicclient = 0;
2293
2294 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2295 MODULE_PARM_DESC(visorchipset_testmsg,
2296                  "1 to manufacture the chipset, bus, and switch messages");
2297 int visorchipset_testmsg = 0;
2298
2299 module_param_named(major, visorchipset_major, int, S_IRUGO);
2300 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2301 int visorchipset_major = 0;
2302
2303 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2304 MODULE_PARM_DESC(visorchipset_serverreqwait,
2305                  "1 to have the module wait for the visor bus to register");
2306 int visorchipset_serverregwait = 0;     /* default is off */
2307 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2308 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2309 int visorchipset_clientregwait = 1;     /* default is on */
2310 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2311 MODULE_PARM_DESC(visorchipset_testteardown,
2312                  "1 to test teardown of the chipset, bus, and switch");
2313 int visorchipset_testteardown = 0;      /* default is off */
2314 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2315                    S_IRUGO);
2316 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2317                  "1 to disable polling of controlVm channel");
2318 int visorchipset_disable_controlvm = 0; /* default is off */
2319 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2320                    int, S_IRUGO);
2321 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2322                  "1 to hold response to CHIPSET_READY");
2323 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2324                                       * response immediately */
2325 module_init(visorchipset_init);
2326 module_exit(visorchipset_exit);
2327
2328 MODULE_AUTHOR("Unisys");
2329 MODULE_LICENSE("GPL");
2330 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2331                    VERSION);
2332 MODULE_VERSION(VERSION);