3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33 #include <linux/crash_dump.h>
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
48 * we switch to slow polling mode. As soon as we get a controlvm
49 * message, we switch back to fast polling mode.
51 #define MIN_IDLE_SECONDS 10
52 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
53 static ulong most_recent_message_jiffies; /* when we got our last
54 * controlvm message */
63 static int serverregistered;
64 static int clientregistered;
66 #define MAX_CHIPSET_EVENTS 2
67 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
69 static struct delayed_work periodic_controlvm_work;
70 static struct workqueue_struct *periodic_controlvm_workqueue;
71 static DEFINE_SEMAPHORE(notifier_lock);
73 static struct controlvm_message_header g_diag_msg_hdr;
74 static struct controlvm_message_header g_chipset_msg_hdr;
75 static struct controlvm_message_header g_del_dump_msg_hdr;
76 static const uuid_le spar_diag_pool_channel_protocol_uuid =
77 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
78 /* 0xffffff is an invalid Bus/Device number */
79 static ulong g_diagpool_bus_no = 0xffffff;
80 static ulong g_diagpool_dev_no = 0xffffff;
81 static struct controlvm_message_packet g_devicechangestate_packet;
83 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
86 #define FOR_VISORHACKBUS(channel_type_guid) \
87 (((uuid_le_cmp(channel_type_guid,\
88 spar_vnic_channel_protocol_uuid) == 0) ||\
89 (uuid_le_cmp(channel_type_guid,\
90 spar_vhba_channel_protocol_uuid) == 0)))
91 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
93 #define is_diagpool_channel(channel_type_guid) \
94 (uuid_le_cmp(channel_type_guid,\
95 spar_diag_pool_channel_protocol_uuid) == 0)
97 static LIST_HEAD(bus_info_list);
98 static LIST_HEAD(dev_info_list);
100 static struct visorchannel *controlvm_channel;
102 /* Manages the request payload in the controlvm channel */
103 static struct controlvm_payload_info {
104 u8 __iomem *ptr; /* pointer to base address of payload pool */
105 u64 offset; /* offset from beginning of controlvm
106 * channel to beginning of payload * pool */
107 u32 bytes; /* number of bytes in payload pool */
108 } controlvm_payload_info;
110 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
111 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
113 static struct livedump_info {
114 struct controlvm_message_header dumpcapture_header;
115 struct controlvm_message_header gettextdump_header;
116 struct controlvm_message_header dumpcomplete_header;
117 BOOL gettextdump_outstanding;
120 atomic_t buffers_in_use;
124 /* The following globals are used to handle the scenario where we are unable to
125 * offload the payload from a controlvm message due to memory requirements. In
126 * this scenario, we simply stash the controlvm message, then attempt to
127 * process it again the next time controlvm_periodic_work() runs.
129 static struct controlvm_message controlvm_pending_msg;
130 static BOOL controlvm_pending_msg_valid = FALSE;
132 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
133 * TRANSMIT_FILE PutFile payloads.
135 static struct kmem_cache *putfile_buffer_list_pool;
136 static const char putfile_buffer_list_pool_name[] =
137 "controlvm_putfile_buffer_list_pool";
139 /* This identifies a data buffer that has been received via a controlvm messages
140 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
142 struct putfile_buffer_entry {
143 struct list_head next; /* putfile_buffer_entry list */
144 struct parser_context *parser_ctx; /* points to input data buffer */
147 /* List of struct putfile_request *, via next_putfile_request member.
148 * Each entry in this list identifies an outstanding TRANSMIT_FILE
151 static LIST_HEAD(putfile_request_list);
153 /* This describes a buffer and its current state of transfer (e.g., how many
154 * bytes have already been supplied as putfile data, and how many bytes are
155 * remaining) for a putfile_request.
157 struct putfile_active_buffer {
158 /* a payload from a controlvm message, containing a file data buffer */
159 struct parser_context *parser_ctx;
160 /* points within data area of parser_ctx to next byte of data */
162 /* # bytes left from <pnext> to the end of this data buffer */
163 size_t bytes_remaining;
166 #define PUTFILE_REQUEST_SIG 0x0906101302281211
167 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
168 * conversation. Structs of this type are dynamically linked into
169 * <Putfile_request_list>.
171 struct putfile_request {
172 u64 sig; /* PUTFILE_REQUEST_SIG */
174 /* header from original TransmitFile request */
175 struct controlvm_message_header controlvm_header;
176 u64 file_request_number; /* from original TransmitFile request */
178 /* link to next struct putfile_request */
179 struct list_head next_putfile_request;
181 /* most-recent sequence number supplied via a controlvm message */
182 u64 data_sequence_number;
184 /* head of putfile_buffer_entry list, which describes the data to be
185 * supplied as putfile data;
186 * - this list is added to when controlvm messages come in that supply
188 * - this list is removed from via the hotplug program that is actually
189 * consuming these buffers to write as file data */
190 struct list_head input_buffer_list;
191 spinlock_t req_list_lock; /* lock for input_buffer_list */
193 /* waiters for input_buffer_list to go non-empty */
194 wait_queue_head_t input_buffer_wq;
196 /* data not yet read within current putfile_buffer_entry */
197 struct putfile_active_buffer active_buf;
199 /* <0 = failed, 0 = in-progress, >0 = successful; */
200 /* note that this must be set with req_list_lock, and if you set <0, */
201 /* it is your responsibility to also free up all of the other objects */
202 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
203 /* before releasing the lock */
204 int completion_status;
207 static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
209 struct parahotplug_request {
210 struct list_head list;
212 unsigned long expiration;
213 struct controlvm_message msg;
216 static LIST_HEAD(parahotplug_request_list);
217 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
218 static void parahotplug_process_list(void);
220 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
221 * CONTROLVM_REPORTEVENT.
223 static struct visorchipset_busdev_notifiers busdev_server_notifiers;
224 static struct visorchipset_busdev_notifiers busdev_client_notifiers;
226 static void bus_create_response(ulong bus_no, int response);
227 static void bus_destroy_response(ulong bus_no, int response);
228 static void device_create_response(ulong bus_no, ulong dev_no, int response);
229 static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
230 static void device_resume_response(ulong bus_no, ulong dev_no, int response);
232 static struct visorchipset_busdev_responders busdev_responders = {
233 .bus_create = bus_create_response,
234 .bus_destroy = bus_destroy_response,
235 .device_create = device_create_response,
236 .device_destroy = device_destroy_response,
237 .device_pause = visorchipset_device_pause_response,
238 .device_resume = device_resume_response,
241 /* info for /dev/visorchipset */
242 static dev_t major_dev = -1; /**< indicates major num for device */
244 /* prototypes for attributes */
245 static ssize_t toolaction_show(struct device *dev,
246 struct device_attribute *attr, char *buf);
247 static ssize_t toolaction_store(struct device *dev,
248 struct device_attribute *attr,
249 const char *buf, size_t count);
250 static DEVICE_ATTR_RW(toolaction);
252 static ssize_t boottotool_show(struct device *dev,
253 struct device_attribute *attr, char *buf);
254 static ssize_t boottotool_store(struct device *dev,
255 struct device_attribute *attr, const char *buf,
257 static DEVICE_ATTR_RW(boottotool);
259 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
261 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
262 const char *buf, size_t count);
263 static DEVICE_ATTR_RW(error);
265 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
267 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
268 const char *buf, size_t count);
269 static DEVICE_ATTR_RW(textid);
271 static ssize_t remaining_steps_show(struct device *dev,
272 struct device_attribute *attr, char *buf);
273 static ssize_t remaining_steps_store(struct device *dev,
274 struct device_attribute *attr,
275 const char *buf, size_t count);
276 static DEVICE_ATTR_RW(remaining_steps);
278 static ssize_t chipsetready_store(struct device *dev,
279 struct device_attribute *attr,
280 const char *buf, size_t count);
281 static DEVICE_ATTR_WO(chipsetready);
283 static ssize_t devicedisabled_store(struct device *dev,
284 struct device_attribute *attr,
285 const char *buf, size_t count);
286 static DEVICE_ATTR_WO(devicedisabled);
288 static ssize_t deviceenabled_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t count);
291 static DEVICE_ATTR_WO(deviceenabled);
293 static struct attribute *visorchipset_install_attrs[] = {
294 &dev_attr_toolaction.attr,
295 &dev_attr_boottotool.attr,
296 &dev_attr_error.attr,
297 &dev_attr_textid.attr,
298 &dev_attr_remaining_steps.attr,
302 static struct attribute_group visorchipset_install_group = {
304 .attrs = visorchipset_install_attrs
307 static struct attribute *visorchipset_guest_attrs[] = {
308 &dev_attr_chipsetready.attr,
312 static struct attribute_group visorchipset_guest_group = {
314 .attrs = visorchipset_guest_attrs
317 static struct attribute *visorchipset_parahotplug_attrs[] = {
318 &dev_attr_devicedisabled.attr,
319 &dev_attr_deviceenabled.attr,
323 static struct attribute_group visorchipset_parahotplug_group = {
324 .name = "parahotplug",
325 .attrs = visorchipset_parahotplug_attrs
328 static const struct attribute_group *visorchipset_dev_groups[] = {
329 &visorchipset_install_group,
330 &visorchipset_guest_group,
331 &visorchipset_parahotplug_group,
335 /* /sys/devices/platform/visorchipset */
336 static struct platform_device visorchipset_platform_device = {
337 .name = "visorchipset",
339 .dev.groups = visorchipset_dev_groups,
342 /* Function prototypes */
343 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
345 static void controlvm_respond_chipset_init(
346 struct controlvm_message_header *msg_hdr, int response,
347 enum ultra_chipset_feature features);
348 static void controlvm_respond_physdev_changestate(
349 struct controlvm_message_header *msg_hdr, int response,
350 struct spar_segment_state state);
352 static ssize_t toolaction_show(struct device *dev,
353 struct device_attribute *attr,
358 visorchannel_read(controlvm_channel,
359 offsetof(struct spar_controlvm_channel_protocol,
360 tool_action), &tool_action, sizeof(u8));
361 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
364 static ssize_t toolaction_store(struct device *dev,
365 struct device_attribute *attr,
366 const char *buf, size_t count)
371 if (kstrtou8(buf, 10, &tool_action) != 0)
374 ret = visorchannel_write(controlvm_channel,
375 offsetof(struct spar_controlvm_channel_protocol,
377 &tool_action, sizeof(u8));
384 static ssize_t boottotool_show(struct device *dev,
385 struct device_attribute *attr,
388 struct efi_spar_indication efi_spar_indication;
390 visorchannel_read(controlvm_channel,
391 offsetof(struct spar_controlvm_channel_protocol,
392 efi_spar_ind), &efi_spar_indication,
393 sizeof(struct efi_spar_indication));
394 return scnprintf(buf, PAGE_SIZE, "%u\n",
395 efi_spar_indication.boot_to_tool);
398 static ssize_t boottotool_store(struct device *dev,
399 struct device_attribute *attr,
400 const char *buf, size_t count)
403 struct efi_spar_indication efi_spar_indication;
405 if (kstrtoint(buf, 10, &val) != 0)
408 efi_spar_indication.boot_to_tool = val;
409 ret = visorchannel_write(controlvm_channel,
410 offsetof(struct spar_controlvm_channel_protocol,
411 efi_spar_ind), &(efi_spar_indication),
412 sizeof(struct efi_spar_indication));
419 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
424 visorchannel_read(controlvm_channel,
425 offsetof(struct spar_controlvm_channel_protocol,
427 &error, sizeof(u32));
428 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
431 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
432 const char *buf, size_t count)
437 if (kstrtou32(buf, 10, &error) != 0)
440 ret = visorchannel_write(controlvm_channel,
441 offsetof(struct spar_controlvm_channel_protocol,
443 &error, sizeof(u32));
449 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
454 visorchannel_read(controlvm_channel,
455 offsetof(struct spar_controlvm_channel_protocol,
456 installation_text_id),
457 &text_id, sizeof(u32));
458 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
461 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
462 const char *buf, size_t count)
467 if (kstrtou32(buf, 10, &text_id) != 0)
470 ret = visorchannel_write(controlvm_channel,
471 offsetof(struct spar_controlvm_channel_protocol,
472 installation_text_id),
473 &text_id, sizeof(u32));
479 static ssize_t remaining_steps_show(struct device *dev,
480 struct device_attribute *attr, char *buf)
484 visorchannel_read(controlvm_channel,
485 offsetof(struct spar_controlvm_channel_protocol,
486 installation_remaining_steps),
487 &remaining_steps, sizeof(u16));
488 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
491 static ssize_t remaining_steps_store(struct device *dev,
492 struct device_attribute *attr,
493 const char *buf, size_t count)
498 if (kstrtou16(buf, 10, &remaining_steps) != 0)
501 ret = visorchannel_write(controlvm_channel,
502 offsetof(struct spar_controlvm_channel_protocol,
503 installation_remaining_steps),
504 &remaining_steps, sizeof(u16));
511 bus_info_clear(void *v)
513 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
518 kfree(p->description);
519 p->description = NULL;
521 p->state.created = 0;
522 memset(p, 0, sizeof(struct visorchipset_bus_info));
526 dev_info_clear(void *v)
528 struct visorchipset_device_info *p =
529 (struct visorchipset_device_info *)(v);
531 p->state.created = 0;
532 memset(p, 0, sizeof(struct visorchipset_device_info));
536 check_chipset_events(void)
540 /* Check events to determine if response should be sent */
541 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
542 send_msg &= chipset_events[i];
547 clear_chipset_events(void)
550 /* Clear chipset_events */
551 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
552 chipset_events[i] = 0;
556 visorchipset_register_busdev_server(
557 struct visorchipset_busdev_notifiers *notifiers,
558 struct visorchipset_busdev_responders *responders,
559 struct ultra_vbus_deviceinfo *driver_info)
561 down(¬ifier_lock);
563 memset(&busdev_server_notifiers, 0,
564 sizeof(busdev_server_notifiers));
565 serverregistered = 0; /* clear flag */
567 busdev_server_notifiers = *notifiers;
568 serverregistered = 1; /* set flag */
571 *responders = busdev_responders;
573 bus_device_info_init(driver_info, "chipset", "visorchipset",
578 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
581 visorchipset_register_busdev_client(
582 struct visorchipset_busdev_notifiers *notifiers,
583 struct visorchipset_busdev_responders *responders,
584 struct ultra_vbus_deviceinfo *driver_info)
586 down(¬ifier_lock);
588 memset(&busdev_client_notifiers, 0,
589 sizeof(busdev_client_notifiers));
590 clientregistered = 0; /* clear flag */
592 busdev_client_notifiers = *notifiers;
593 clientregistered = 1; /* set flag */
596 *responders = busdev_responders;
598 bus_device_info_init(driver_info, "chipset(bolts)",
599 "visorchipset", VERSION, NULL);
602 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
605 cleanup_controlvm_structures(void)
607 struct visorchipset_bus_info *bi, *tmp_bi;
608 struct visorchipset_device_info *di, *tmp_di;
610 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
612 list_del(&bi->entry);
616 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
618 list_del(&di->entry);
624 chipset_init(struct controlvm_message *inmsg)
626 static int chipset_inited;
627 enum ultra_chipset_feature features = 0;
628 int rc = CONTROLVM_RESP_SUCCESS;
630 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
631 if (chipset_inited) {
632 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
636 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
638 /* Set features to indicate we support parahotplug (if Command
639 * also supports it). */
641 inmsg->cmd.init_chipset.
642 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
644 /* Set the "reply" bit so Command knows this is a
645 * features-aware driver. */
646 features |= ULTRA_CHIPSET_FEATURE_REPLY;
650 cleanup_controlvm_structures();
651 if (inmsg->hdr.flags.response_expected)
652 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
656 controlvm_init_response(struct controlvm_message *msg,
657 struct controlvm_message_header *msg_hdr, int response)
659 memset(msg, 0, sizeof(struct controlvm_message));
660 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
661 msg->hdr.payload_bytes = 0;
662 msg->hdr.payload_vm_offset = 0;
663 msg->hdr.payload_max_bytes = 0;
665 msg->hdr.flags.failed = 1;
666 msg->hdr.completion_status = (u32) (-response);
671 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
673 struct controlvm_message outmsg;
675 controlvm_init_response(&outmsg, msg_hdr, response);
676 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
677 * back the deviceChangeState structure in the packet. */
678 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
679 g_devicechangestate_packet.device_change_state.bus_no ==
681 g_devicechangestate_packet.device_change_state.dev_no ==
683 outmsg.cmd = g_devicechangestate_packet;
684 if (outmsg.hdr.flags.test_message == 1)
687 if (!visorchannel_signalinsert(controlvm_channel,
688 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
694 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
696 enum ultra_chipset_feature features)
698 struct controlvm_message outmsg;
700 controlvm_init_response(&outmsg, msg_hdr, response);
701 outmsg.cmd.init_chipset.features = features;
702 if (!visorchannel_signalinsert(controlvm_channel,
703 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
708 static void controlvm_respond_physdev_changestate(
709 struct controlvm_message_header *msg_hdr, int response,
710 struct spar_segment_state state)
712 struct controlvm_message outmsg;
714 controlvm_init_response(&outmsg, msg_hdr, response);
715 outmsg.cmd.device_change_state.state = state;
716 outmsg.cmd.device_change_state.flags.phys_device = 1;
717 if (!visorchannel_signalinsert(controlvm_channel,
718 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
724 visorchipset_save_message(struct controlvm_message *msg,
725 enum crash_obj_type type)
727 u32 crash_msg_offset;
730 /* get saved message count */
731 if (visorchannel_read(controlvm_channel,
732 offsetof(struct spar_controlvm_channel_protocol,
733 saved_crash_message_count),
734 &crash_msg_count, sizeof(u16)) < 0) {
735 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
736 POSTCODE_SEVERITY_ERR);
740 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
741 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
743 POSTCODE_SEVERITY_ERR);
747 /* get saved crash message offset */
748 if (visorchannel_read(controlvm_channel,
749 offsetof(struct spar_controlvm_channel_protocol,
750 saved_crash_message_offset),
751 &crash_msg_offset, sizeof(u32)) < 0) {
752 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
753 POSTCODE_SEVERITY_ERR);
757 if (type == CRASH_BUS) {
758 if (visorchannel_write(controlvm_channel,
761 sizeof(struct controlvm_message)) < 0) {
762 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
763 POSTCODE_SEVERITY_ERR);
767 if (visorchannel_write(controlvm_channel,
769 sizeof(struct controlvm_message), msg,
770 sizeof(struct controlvm_message)) < 0) {
771 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
772 POSTCODE_SEVERITY_ERR);
777 EXPORT_SYMBOL_GPL(visorchipset_save_message);
780 bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
782 struct visorchipset_bus_info *p = NULL;
783 BOOL need_clear = FALSE;
785 p = findbus(&bus_info_list, bus_no);
790 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
791 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
792 /* undo the row we just created... */
793 delbusdevices(&dev_info_list, bus_no);
795 if (cmd_id == CONTROLVM_BUS_CREATE)
796 p->state.created = 1;
797 if (cmd_id == CONTROLVM_BUS_DESTROY)
801 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
802 return; /* no controlvm response needed */
803 if (p->pending_msg_hdr.id != (u32)cmd_id)
805 controlvm_respond(&p->pending_msg_hdr, response);
806 p->pending_msg_hdr.id = CONTROLVM_INVALID;
809 delbusdevices(&dev_info_list, bus_no);
814 device_changestate_responder(enum controlvm_id cmd_id,
815 ulong bus_no, ulong dev_no, int response,
816 struct spar_segment_state response_state)
818 struct visorchipset_device_info *p = NULL;
819 struct controlvm_message outmsg;
821 p = finddevice(&dev_info_list, bus_no, dev_no);
824 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
825 return; /* no controlvm response needed */
826 if (p->pending_msg_hdr.id != cmd_id)
829 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
831 outmsg.cmd.device_change_state.bus_no = bus_no;
832 outmsg.cmd.device_change_state.dev_no = dev_no;
833 outmsg.cmd.device_change_state.state = response_state;
835 if (!visorchannel_signalinsert(controlvm_channel,
836 CONTROLVM_QUEUE_REQUEST, &outmsg))
839 p->pending_msg_hdr.id = CONTROLVM_INVALID;
843 device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
846 struct visorchipset_device_info *p = NULL;
847 BOOL need_clear = FALSE;
849 p = finddevice(&dev_info_list, bus_no, dev_no);
853 if (cmd_id == CONTROLVM_DEVICE_CREATE)
854 p->state.created = 1;
855 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
859 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
860 return; /* no controlvm response needed */
862 if (p->pending_msg_hdr.id != (u32)cmd_id)
865 controlvm_respond(&p->pending_msg_hdr, response);
866 p->pending_msg_hdr.id = CONTROLVM_INVALID;
872 bus_epilog(u32 bus_no,
873 u32 cmd, struct controlvm_message_header *msg_hdr,
874 int response, BOOL need_response)
876 BOOL notified = FALSE;
878 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
885 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
886 sizeof(struct controlvm_message_header));
888 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
891 down(¬ifier_lock);
892 if (response == CONTROLVM_RESP_SUCCESS) {
894 case CONTROLVM_BUS_CREATE:
895 /* We can't tell from the bus_create
896 * information which of our 2 bus flavors the
897 * devices on this bus will ultimately end up.
898 * FORTUNATELY, it turns out it is harmless to
899 * send the bus_create to both of them. We can
900 * narrow things down a little bit, though,
901 * because we know: - BusDev_Server can handle
902 * either server or client devices
903 * - BusDev_Client can handle ONLY client
905 if (busdev_server_notifiers.bus_create) {
906 (*busdev_server_notifiers.bus_create) (bus_no);
909 if ((!bus_info->flags.server) /*client */ &&
910 busdev_client_notifiers.bus_create) {
911 (*busdev_client_notifiers.bus_create) (bus_no);
915 case CONTROLVM_BUS_DESTROY:
916 if (busdev_server_notifiers.bus_destroy) {
917 (*busdev_server_notifiers.bus_destroy) (bus_no);
920 if ((!bus_info->flags.server) /*client */ &&
921 busdev_client_notifiers.bus_destroy) {
922 (*busdev_client_notifiers.bus_destroy) (bus_no);
929 /* The callback function just called above is responsible
930 * for calling the appropriate visorchipset_busdev_responders
931 * function, which will call bus_responder()
935 bus_responder(cmd, bus_no, response);
940 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
941 struct controlvm_message_header *msg_hdr, int response,
942 BOOL need_response, BOOL for_visorbus)
944 struct visorchipset_busdev_notifiers *notifiers = NULL;
945 BOOL notified = FALSE;
947 struct visorchipset_device_info *dev_info =
948 finddevice(&dev_info_list, bus_no, dev_no);
950 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
958 notifiers = &busdev_server_notifiers;
960 notifiers = &busdev_client_notifiers;
962 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
963 sizeof(struct controlvm_message_header));
965 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
968 down(¬ifier_lock);
971 case CONTROLVM_DEVICE_CREATE:
972 if (notifiers->device_create) {
973 (*notifiers->device_create) (bus_no, dev_no);
977 case CONTROLVM_DEVICE_CHANGESTATE:
978 /* ServerReady / ServerRunning / SegmentStateRunning */
979 if (state.alive == segment_state_running.alive &&
981 segment_state_running.operating) {
982 if (notifiers->device_resume) {
983 (*notifiers->device_resume) (bus_no,
988 /* ServerNotReady / ServerLost / SegmentStateStandby */
989 else if (state.alive == segment_state_standby.alive &&
991 segment_state_standby.operating) {
992 /* technically this is standby case
993 * where server is lost
995 if (notifiers->device_pause) {
996 (*notifiers->device_pause) (bus_no,
1000 } else if (state.alive == segment_state_paused.alive &&
1002 segment_state_paused.operating) {
1003 /* this is lite pause where channel is
1004 * still valid just 'pause' of it
1006 if (bus_no == g_diagpool_bus_no &&
1007 dev_no == g_diagpool_dev_no) {
1008 /* this will trigger the
1009 * diag_shutdown.sh script in
1010 * the visorchipset hotplug */
1012 (&visorchipset_platform_device.dev.
1013 kobj, KOBJ_ONLINE, envp);
1017 case CONTROLVM_DEVICE_DESTROY:
1018 if (notifiers->device_destroy) {
1019 (*notifiers->device_destroy) (bus_no, dev_no);
1026 /* The callback function just called above is responsible
1027 * for calling the appropriate visorchipset_busdev_responders
1028 * function, which will call device_responder()
1032 device_responder(cmd, bus_no, dev_no, response);
1037 bus_create(struct controlvm_message *inmsg)
1039 struct controlvm_message_packet *cmd = &inmsg->cmd;
1040 ulong bus_no = cmd->create_bus.bus_no;
1041 int rc = CONTROLVM_RESP_SUCCESS;
1042 struct visorchipset_bus_info *bus_info = NULL;
1044 bus_info = findbus(&bus_info_list, bus_no);
1045 if (bus_info && (bus_info->state.created == 1)) {
1046 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1047 POSTCODE_SEVERITY_ERR);
1048 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1051 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1053 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1054 POSTCODE_SEVERITY_ERR);
1055 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1059 INIT_LIST_HEAD(&bus_info->entry);
1060 bus_info->bus_no = bus_no;
1061 bus_info->dev_no = cmd->create_bus.dev_count;
1063 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1065 if (inmsg->hdr.flags.test_message == 1)
1066 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1068 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1070 bus_info->flags.server = inmsg->hdr.flags.server;
1071 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1072 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1073 bus_info->chan_info.channel_type_uuid =
1074 cmd->create_bus.bus_data_type_uuid;
1075 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1077 list_add(&bus_info->entry, &bus_info_list);
1079 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1082 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1083 rc, inmsg->hdr.flags.response_expected == 1);
1087 bus_destroy(struct controlvm_message *inmsg)
1089 struct controlvm_message_packet *cmd = &inmsg->cmd;
1090 ulong bus_no = cmd->destroy_bus.bus_no;
1091 struct visorchipset_bus_info *bus_info;
1092 int rc = CONTROLVM_RESP_SUCCESS;
1094 bus_info = findbus(&bus_info_list, bus_no);
1096 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1097 else if (bus_info->state.created == 0)
1098 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1100 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1101 rc, inmsg->hdr.flags.response_expected == 1);
1105 bus_configure(struct controlvm_message *inmsg,
1106 struct parser_context *parser_ctx)
1108 struct controlvm_message_packet *cmd = &inmsg->cmd;
1109 ulong bus_no = cmd->configure_bus.bus_no;
1110 struct visorchipset_bus_info *bus_info = NULL;
1111 int rc = CONTROLVM_RESP_SUCCESS;
1114 bus_no = cmd->configure_bus.bus_no;
1115 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1116 POSTCODE_SEVERITY_INFO);
1118 bus_info = findbus(&bus_info_list, bus_no);
1120 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1121 POSTCODE_SEVERITY_ERR);
1122 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1123 } else if (bus_info->state.created == 0) {
1124 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1125 POSTCODE_SEVERITY_ERR);
1126 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1127 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1128 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1129 POSTCODE_SEVERITY_ERR);
1130 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1132 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1133 bus_info->partition_uuid = parser_id_get(parser_ctx);
1134 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1135 bus_info->name = parser_string_get(parser_ctx);
1137 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1138 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1139 POSTCODE_SEVERITY_INFO);
1141 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1142 rc, inmsg->hdr.flags.response_expected == 1);
1146 my_device_create(struct controlvm_message *inmsg)
1148 struct controlvm_message_packet *cmd = &inmsg->cmd;
1149 ulong bus_no = cmd->create_device.bus_no;
1150 ulong dev_no = cmd->create_device.dev_no;
1151 struct visorchipset_device_info *dev_info = NULL;
1152 struct visorchipset_bus_info *bus_info = NULL;
1153 int rc = CONTROLVM_RESP_SUCCESS;
1155 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1156 if (dev_info && (dev_info->state.created == 1)) {
1157 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1158 POSTCODE_SEVERITY_ERR);
1159 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1162 bus_info = findbus(&bus_info_list, bus_no);
1164 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1165 POSTCODE_SEVERITY_ERR);
1166 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1169 if (bus_info->state.created == 0) {
1170 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1171 POSTCODE_SEVERITY_ERR);
1172 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1175 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1177 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1178 POSTCODE_SEVERITY_ERR);
1179 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1183 INIT_LIST_HEAD(&dev_info->entry);
1184 dev_info->bus_no = bus_no;
1185 dev_info->dev_no = dev_no;
1186 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1187 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1188 POSTCODE_SEVERITY_INFO);
1190 if (inmsg->hdr.flags.test_message == 1)
1191 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1193 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1194 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1195 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1196 dev_info->chan_info.channel_type_uuid =
1197 cmd->create_device.data_type_uuid;
1198 dev_info->chan_info.intr = cmd->create_device.intr;
1199 list_add(&dev_info->entry, &dev_info_list);
1200 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1201 POSTCODE_SEVERITY_INFO);
1203 /* get the bus and devNo for DiagPool channel */
1205 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1206 g_diagpool_bus_no = bus_no;
1207 g_diagpool_dev_no = dev_no;
1209 device_epilog(bus_no, dev_no, segment_state_running,
1210 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1211 inmsg->hdr.flags.response_expected == 1,
1212 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1216 my_device_changestate(struct controlvm_message *inmsg)
1218 struct controlvm_message_packet *cmd = &inmsg->cmd;
1219 ulong bus_no = cmd->device_change_state.bus_no;
1220 ulong dev_no = cmd->device_change_state.dev_no;
1221 struct spar_segment_state state = cmd->device_change_state.state;
1222 struct visorchipset_device_info *dev_info = NULL;
1223 int rc = CONTROLVM_RESP_SUCCESS;
1225 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1227 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1228 POSTCODE_SEVERITY_ERR);
1229 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1230 } else if (dev_info->state.created == 0) {
1231 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1232 POSTCODE_SEVERITY_ERR);
1233 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1235 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1236 device_epilog(bus_no, dev_no, state,
1237 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1238 inmsg->hdr.flags.response_expected == 1,
1240 dev_info->chan_info.channel_type_uuid));
1244 my_device_destroy(struct controlvm_message *inmsg)
1246 struct controlvm_message_packet *cmd = &inmsg->cmd;
1247 ulong bus_no = cmd->destroy_device.bus_no;
1248 ulong dev_no = cmd->destroy_device.dev_no;
1249 struct visorchipset_device_info *dev_info = NULL;
1250 int rc = CONTROLVM_RESP_SUCCESS;
1252 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1254 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1255 else if (dev_info->state.created == 0)
1256 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1258 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1259 device_epilog(bus_no, dev_no, segment_state_running,
1260 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1261 inmsg->hdr.flags.response_expected == 1,
1263 dev_info->chan_info.channel_type_uuid));
1266 /* When provided with the physical address of the controlvm channel
1267 * (phys_addr), the offset to the payload area we need to manage
1268 * (offset), and the size of this payload area (bytes), fills in the
1269 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1273 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1274 struct controlvm_payload_info *info)
1276 u8 __iomem *payload = NULL;
1277 int rc = CONTROLVM_RESP_SUCCESS;
1280 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1283 memset(info, 0, sizeof(struct controlvm_payload_info));
1284 if ((offset == 0) || (bytes == 0)) {
1285 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1288 payload = ioremap_cache(phys_addr + offset, bytes);
1290 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1294 info->offset = offset;
1295 info->bytes = bytes;
1296 info->ptr = payload;
1309 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1315 memset(info, 0, sizeof(struct controlvm_payload_info));
1319 initialize_controlvm_payload(void)
1321 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1322 u64 payload_offset = 0;
1323 u32 payload_bytes = 0;
1325 if (visorchannel_read(controlvm_channel,
1326 offsetof(struct spar_controlvm_channel_protocol,
1327 request_payload_offset),
1328 &payload_offset, sizeof(payload_offset)) < 0) {
1329 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1330 POSTCODE_SEVERITY_ERR);
1333 if (visorchannel_read(controlvm_channel,
1334 offsetof(struct spar_controlvm_channel_protocol,
1335 request_payload_bytes),
1336 &payload_bytes, sizeof(payload_bytes)) < 0) {
1337 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1338 POSTCODE_SEVERITY_ERR);
1341 initialize_controlvm_payload_info(phys_addr,
1342 payload_offset, payload_bytes,
1343 &controlvm_payload_info);
1346 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1347 * Returns CONTROLVM_RESP_xxx code.
1350 visorchipset_chipset_ready(void)
1352 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1353 return CONTROLVM_RESP_SUCCESS;
1355 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1358 visorchipset_chipset_selftest(void)
1360 char env_selftest[20];
1361 char *envp[] = { env_selftest, NULL };
1363 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1364 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1366 return CONTROLVM_RESP_SUCCESS;
1368 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1370 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1371 * Returns CONTROLVM_RESP_xxx code.
1374 visorchipset_chipset_notready(void)
1376 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1377 return CONTROLVM_RESP_SUCCESS;
1379 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1382 chipset_ready(struct controlvm_message_header *msg_hdr)
1384 int rc = visorchipset_chipset_ready();
1386 if (rc != CONTROLVM_RESP_SUCCESS)
1388 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1389 controlvm_respond(msg_hdr, rc);
1390 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1391 /* Send CHIPSET_READY response when all modules have been loaded
1392 * and disks mounted for the partition
1394 g_chipset_msg_hdr = *msg_hdr;
1399 chipset_selftest(struct controlvm_message_header *msg_hdr)
1401 int rc = visorchipset_chipset_selftest();
1403 if (rc != CONTROLVM_RESP_SUCCESS)
1405 if (msg_hdr->flags.response_expected)
1406 controlvm_respond(msg_hdr, rc);
1410 chipset_notready(struct controlvm_message_header *msg_hdr)
1412 int rc = visorchipset_chipset_notready();
1414 if (rc != CONTROLVM_RESP_SUCCESS)
1416 if (msg_hdr->flags.response_expected)
1417 controlvm_respond(msg_hdr, rc);
1420 /* This is your "one-stop" shop for grabbing the next message from the
1421 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1424 read_controlvm_event(struct controlvm_message *msg)
1426 if (visorchannel_signalremove(controlvm_channel,
1427 CONTROLVM_QUEUE_EVENT, msg)) {
1429 if (msg->hdr.flags.test_message == 1)
1437 * The general parahotplug flow works as follows. The visorchipset
1438 * driver receives a DEVICE_CHANGESTATE message from Command
1439 * specifying a physical device to enable or disable. The CONTROLVM
1440 * message handler calls parahotplug_process_message, which then adds
1441 * the message to a global list and kicks off a udev event which
1442 * causes a user level script to enable or disable the specified
1443 * device. The udev script then writes to
1444 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1445 * to get called, at which point the appropriate CONTROLVM message is
1446 * retrieved from the list and responded to.
1449 #define PARAHOTPLUG_TIMEOUT_MS 2000
1452 * Generate unique int to match an outstanding CONTROLVM message with a
1453 * udev script /proc response
1456 parahotplug_next_id(void)
1458 static atomic_t id = ATOMIC_INIT(0);
1460 return atomic_inc_return(&id);
1464 * Returns the time (in jiffies) when a CONTROLVM message on the list
1465 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1467 static unsigned long
1468 parahotplug_next_expiration(void)
1470 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1474 * Create a parahotplug_request, which is basically a wrapper for a
1475 * CONTROLVM_MESSAGE that we can stick on a list
1477 static struct parahotplug_request *
1478 parahotplug_request_create(struct controlvm_message *msg)
1480 struct parahotplug_request *req;
1482 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1486 req->id = parahotplug_next_id();
1487 req->expiration = parahotplug_next_expiration();
1494 * Free a parahotplug_request.
1497 parahotplug_request_destroy(struct parahotplug_request *req)
1503 * Cause uevent to run the user level script to do the disable/enable
1504 * specified in (the CONTROLVM message in) the specified
1505 * parahotplug_request
1508 parahotplug_request_kickoff(struct parahotplug_request *req)
1510 struct controlvm_message_packet *cmd = &req->msg.cmd;
1511 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1514 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1517 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1518 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1519 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1520 cmd->device_change_state.state.active);
1521 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1522 cmd->device_change_state.bus_no);
1523 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1524 cmd->device_change_state.dev_no >> 3);
1525 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1526 cmd->device_change_state.dev_no & 0x7);
1528 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1533 * Remove any request from the list that's been on there too long and
1534 * respond with an error.
1537 parahotplug_process_list(void)
1539 struct list_head *pos = NULL;
1540 struct list_head *tmp = NULL;
1542 spin_lock(¶hotplug_request_list_lock);
1544 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1545 struct parahotplug_request *req =
1546 list_entry(pos, struct parahotplug_request, list);
1548 if (!time_after_eq(jiffies, req->expiration))
1552 if (req->msg.hdr.flags.response_expected)
1553 controlvm_respond_physdev_changestate(
1555 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1556 req->msg.cmd.device_change_state.state);
1557 parahotplug_request_destroy(req);
1560 spin_unlock(¶hotplug_request_list_lock);
1564 * Called from the /proc handler, which means the user script has
1565 * finished the enable/disable. Find the matching identifier, and
1566 * respond to the CONTROLVM message with success.
1569 parahotplug_request_complete(int id, u16 active)
1571 struct list_head *pos = NULL;
1572 struct list_head *tmp = NULL;
1574 spin_lock(¶hotplug_request_list_lock);
1576 /* Look for a request matching "id". */
1577 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1578 struct parahotplug_request *req =
1579 list_entry(pos, struct parahotplug_request, list);
1580 if (req->id == id) {
1581 /* Found a match. Remove it from the list and
1585 spin_unlock(¶hotplug_request_list_lock);
1586 req->msg.cmd.device_change_state.state.active = active;
1587 if (req->msg.hdr.flags.response_expected)
1588 controlvm_respond_physdev_changestate(
1589 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1590 req->msg.cmd.device_change_state.state);
1591 parahotplug_request_destroy(req);
1596 spin_unlock(¶hotplug_request_list_lock);
1601 * Enables or disables a PCI device by kicking off a udev script
1604 parahotplug_process_message(struct controlvm_message *inmsg)
1606 struct parahotplug_request *req;
1608 req = parahotplug_request_create(inmsg);
1613 if (inmsg->cmd.device_change_state.state.active) {
1614 /* For enable messages, just respond with success
1615 * right away. This is a bit of a hack, but there are
1616 * issues with the early enable messages we get (with
1617 * either the udev script not detecting that the device
1618 * is up, or not getting called at all). Fortunately
1619 * the messages that get lost don't matter anyway, as
1620 * devices are automatically enabled at
1623 parahotplug_request_kickoff(req);
1624 controlvm_respond_physdev_changestate(&inmsg->hdr,
1625 CONTROLVM_RESP_SUCCESS,
1626 inmsg->cmd.device_change_state.state);
1627 parahotplug_request_destroy(req);
1629 /* For disable messages, add the request to the
1630 * request list before kicking off the udev script. It
1631 * won't get responded to until the script has
1632 * indicated it's done.
1634 spin_lock(¶hotplug_request_list_lock);
1635 list_add_tail(&req->list, ¶hotplug_request_list);
1636 spin_unlock(¶hotplug_request_list_lock);
1638 parahotplug_request_kickoff(req);
1642 /* Process a controlvm message.
1644 * FALSE - this function will return FALSE only in the case where the
1645 * controlvm message was NOT processed, but processing must be
1646 * retried before reading the next controlvm message; a
1647 * scenario where this can occur is when we need to throttle
1648 * the allocation of memory in which to copy out controlvm
1650 * TRUE - processing of the controlvm message completed,
1651 * either successfully or with an error.
1654 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1656 struct controlvm_message_packet *cmd = &inmsg.cmd;
1659 struct parser_context *parser_ctx = NULL;
1660 bool local_addr = false;
1661 struct controlvm_message ackmsg;
1663 /* create parsing context if necessary */
1664 local_addr = (inmsg.hdr.flags.test_message == 1);
1665 if (channel_addr == 0)
1667 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1668 parm_bytes = inmsg.hdr.payload_bytes;
1670 /* Parameter and channel addresses within test messages actually lie
1671 * within our OS-controlled memory. We need to know that, because it
1672 * makes a difference in how we compute the virtual address.
1674 if (parm_addr != 0 && parm_bytes != 0) {
1678 parser_init_byte_stream(parm_addr, parm_bytes,
1679 local_addr, &retry);
1680 if (!parser_ctx && retry)
1685 controlvm_init_response(&ackmsg, &inmsg.hdr,
1686 CONTROLVM_RESP_SUCCESS);
1687 if (controlvm_channel)
1688 visorchannel_signalinsert(controlvm_channel,
1689 CONTROLVM_QUEUE_ACK,
1692 switch (inmsg.hdr.id) {
1693 case CONTROLVM_CHIPSET_INIT:
1694 chipset_init(&inmsg);
1696 case CONTROLVM_BUS_CREATE:
1699 case CONTROLVM_BUS_DESTROY:
1700 bus_destroy(&inmsg);
1702 case CONTROLVM_BUS_CONFIGURE:
1703 bus_configure(&inmsg, parser_ctx);
1705 case CONTROLVM_DEVICE_CREATE:
1706 my_device_create(&inmsg);
1708 case CONTROLVM_DEVICE_CHANGESTATE:
1709 if (cmd->device_change_state.flags.phys_device) {
1710 parahotplug_process_message(&inmsg);
1712 /* save the hdr and cmd structures for later use */
1713 /* when sending back the response to Command */
1714 my_device_changestate(&inmsg);
1715 g_diag_msg_hdr = inmsg.hdr;
1716 g_devicechangestate_packet = inmsg.cmd;
1720 case CONTROLVM_DEVICE_DESTROY:
1721 my_device_destroy(&inmsg);
1723 case CONTROLVM_DEVICE_CONFIGURE:
1724 /* no op for now, just send a respond that we passed */
1725 if (inmsg.hdr.flags.response_expected)
1726 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1728 case CONTROLVM_CHIPSET_READY:
1729 chipset_ready(&inmsg.hdr);
1731 case CONTROLVM_CHIPSET_SELFTEST:
1732 chipset_selftest(&inmsg.hdr);
1734 case CONTROLVM_CHIPSET_STOP:
1735 chipset_notready(&inmsg.hdr);
1738 if (inmsg.hdr.flags.response_expected)
1739 controlvm_respond(&inmsg.hdr,
1740 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1745 parser_done(parser_ctx);
1751 static HOSTADDRESS controlvm_get_channel_address(void)
1756 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1763 controlvm_periodic_work(struct work_struct *work)
1765 struct controlvm_message inmsg;
1766 BOOL got_command = FALSE;
1767 BOOL handle_command_failed = FALSE;
1768 static u64 poll_count;
1770 /* make sure visorbus server is registered for controlvm callbacks */
1771 if (visorchipset_serverregwait && !serverregistered)
1773 /* make sure visorclientbus server is regsitered for controlvm
1776 if (visorchipset_clientregwait && !clientregistered)
1780 if (poll_count >= 250)
1785 /* Check events to determine if response to CHIPSET_READY
1788 if (visorchipset_holdchipsetready &&
1789 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1790 if (check_chipset_events() == 1) {
1791 controlvm_respond(&g_chipset_msg_hdr, 0);
1792 clear_chipset_events();
1793 memset(&g_chipset_msg_hdr, 0,
1794 sizeof(struct controlvm_message_header));
1798 while (visorchannel_signalremove(controlvm_channel,
1799 CONTROLVM_QUEUE_RESPONSE,
1803 if (controlvm_pending_msg_valid) {
1804 /* we throttled processing of a prior
1805 * msg, so try to process it again
1806 * rather than reading a new one
1808 inmsg = controlvm_pending_msg;
1809 controlvm_pending_msg_valid = FALSE;
1812 got_command = read_controlvm_event(&inmsg);
1816 handle_command_failed = FALSE;
1817 while (got_command && (!handle_command_failed)) {
1818 most_recent_message_jiffies = jiffies;
1819 if (handle_command(inmsg,
1820 visorchannel_get_physaddr
1821 (controlvm_channel)))
1822 got_command = read_controlvm_event(&inmsg);
1824 /* this is a scenario where throttling
1825 * is required, but probably NOT an
1826 * error...; we stash the current
1827 * controlvm msg so we will attempt to
1828 * reprocess it on our next loop
1830 handle_command_failed = TRUE;
1831 controlvm_pending_msg = inmsg;
1832 controlvm_pending_msg_valid = TRUE;
1836 /* parahotplug_worker */
1837 parahotplug_process_list();
1841 if (time_after(jiffies,
1842 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1843 /* it's been longer than MIN_IDLE_SECONDS since we
1844 * processed our last controlvm message; slow down the
1847 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1848 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1850 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1851 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1854 queue_delayed_work(periodic_controlvm_workqueue,
1855 &periodic_controlvm_work, poll_jiffies);
1859 setup_crash_devices_work_queue(struct work_struct *work)
1861 struct controlvm_message local_crash_bus_msg;
1862 struct controlvm_message local_crash_dev_msg;
1863 struct controlvm_message msg;
1864 u32 local_crash_msg_offset;
1865 u16 local_crash_msg_count;
1867 /* make sure visorbus server is registered for controlvm callbacks */
1868 if (visorchipset_serverregwait && !serverregistered)
1871 /* make sure visorclientbus server is regsitered for controlvm
1874 if (visorchipset_clientregwait && !clientregistered)
1877 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1879 /* send init chipset msg */
1880 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1881 msg.cmd.init_chipset.bus_count = 23;
1882 msg.cmd.init_chipset.switch_count = 0;
1886 /* get saved message count */
1887 if (visorchannel_read(controlvm_channel,
1888 offsetof(struct spar_controlvm_channel_protocol,
1889 saved_crash_message_count),
1890 &local_crash_msg_count, sizeof(u16)) < 0) {
1891 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1892 POSTCODE_SEVERITY_ERR);
1896 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1897 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1898 local_crash_msg_count,
1899 POSTCODE_SEVERITY_ERR);
1903 /* get saved crash message offset */
1904 if (visorchannel_read(controlvm_channel,
1905 offsetof(struct spar_controlvm_channel_protocol,
1906 saved_crash_message_offset),
1907 &local_crash_msg_offset, sizeof(u32)) < 0) {
1908 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1909 POSTCODE_SEVERITY_ERR);
1913 /* read create device message for storage bus offset */
1914 if (visorchannel_read(controlvm_channel,
1915 local_crash_msg_offset,
1916 &local_crash_bus_msg,
1917 sizeof(struct controlvm_message)) < 0) {
1918 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1919 POSTCODE_SEVERITY_ERR);
1923 /* read create device message for storage device */
1924 if (visorchannel_read(controlvm_channel,
1925 local_crash_msg_offset +
1926 sizeof(struct controlvm_message),
1927 &local_crash_dev_msg,
1928 sizeof(struct controlvm_message)) < 0) {
1929 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1930 POSTCODE_SEVERITY_ERR);
1934 /* reuse IOVM create bus message */
1935 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1936 bus_create(&local_crash_bus_msg);
1938 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1939 POSTCODE_SEVERITY_ERR);
1943 /* reuse create device message for storage device */
1944 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1945 my_device_create(&local_crash_dev_msg);
1947 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1948 POSTCODE_SEVERITY_ERR);
1951 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1956 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1958 queue_delayed_work(periodic_controlvm_workqueue,
1959 &periodic_controlvm_work, poll_jiffies);
1963 bus_create_response(ulong bus_no, int response)
1965 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1969 bus_destroy_response(ulong bus_no, int response)
1971 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1975 device_create_response(ulong bus_no, ulong dev_no, int response)
1977 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1981 device_destroy_response(ulong bus_no, ulong dev_no, int response)
1983 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1987 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1989 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1990 bus_no, dev_no, response,
1991 segment_state_standby);
1993 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1996 device_resume_response(ulong bus_no, ulong dev_no, int response)
1998 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1999 bus_no, dev_no, response,
2000 segment_state_running);
2004 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2006 void *p = findbus(&bus_info_list, bus_no);
2010 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2013 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2016 visorchipset_set_bus_context(ulong bus_no, void *context)
2018 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2022 p->bus_driver_context = context;
2025 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2028 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2029 struct visorchipset_device_info *dev_info)
2031 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2035 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2038 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2041 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2043 struct visorchipset_device_info *p =
2044 finddevice(&dev_info_list, bus_no, dev_no);
2048 p->bus_driver_context = context;
2051 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2053 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2056 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2066 /* __GFP_NORETRY means "ok to fail", meaning
2067 * kmem_cache_alloc() can return NULL, implying the caller CAN
2068 * cope with failure. If you do NOT specify __GFP_NORETRY,
2069 * Linux will go to extreme measures to get memory for you
2070 * (like, invoke oom killer), which will probably cripple the
2073 gfp |= __GFP_NORETRY;
2074 p = kmem_cache_alloc(pool, gfp);
2078 atomic_inc(&visorchipset_cache_buffers_in_use);
2082 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2085 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2090 atomic_dec(&visorchipset_cache_buffers_in_use);
2091 kmem_cache_free(pool, p);
2094 static ssize_t chipsetready_store(struct device *dev,
2095 struct device_attribute *attr,
2096 const char *buf, size_t count)
2100 if (sscanf(buf, "%63s", msgtype) != 1)
2103 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2104 chipset_events[0] = 1;
2106 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2107 chipset_events[1] = 1;
2113 /* The parahotplug/devicedisabled interface gets called by our support script
2114 * when an SR-IOV device has been shut down. The ID is passed to the script
2115 * and then passed back when the device has been removed.
2117 static ssize_t devicedisabled_store(struct device *dev,
2118 struct device_attribute *attr,
2119 const char *buf, size_t count)
2123 if (kstrtouint(buf, 10, &id) != 0)
2126 parahotplug_request_complete(id, 0);
2130 /* The parahotplug/deviceenabled interface gets called by our support script
2131 * when an SR-IOV device has been recovered. The ID is passed to the script
2132 * and then passed back when the device has been brought back up.
2134 static ssize_t deviceenabled_store(struct device *dev,
2135 struct device_attribute *attr,
2136 const char *buf, size_t count)
2140 if (kstrtouint(buf, 10, &id) != 0)
2143 parahotplug_request_complete(id, 1);
2148 visorchipset_init(void)
2153 if (!unisys_spar_platform)
2156 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2157 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2158 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2159 memset(&livedump_info, 0, sizeof(livedump_info));
2160 atomic_set(&livedump_info.buffers_in_use, 0);
2162 if (visorchipset_testvnic) {
2163 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2168 addr = controlvm_get_channel_address();
2171 visorchannel_create_with_lock
2173 sizeof(struct spar_controlvm_channel_protocol),
2174 spar_controlvm_channel_protocol_uuid);
2175 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2176 visorchannel_get_header(controlvm_channel))) {
2177 initialize_controlvm_payload();
2179 visorchannel_destroy(controlvm_channel);
2180 controlvm_channel = NULL;
2187 major_dev = MKDEV(visorchipset_major, 0);
2188 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2190 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2194 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2196 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2198 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2200 putfile_buffer_list_pool =
2201 kmem_cache_create(putfile_buffer_list_pool_name,
2202 sizeof(struct putfile_buffer_entry),
2203 0, SLAB_HWCACHE_ALIGN, NULL);
2204 if (!putfile_buffer_list_pool) {
2205 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2209 if (!visorchipset_disable_controlvm) {
2210 /* if booting in a crash kernel */
2211 if (is_kdump_kernel())
2212 INIT_DELAYED_WORK(&periodic_controlvm_work,
2213 setup_crash_devices_work_queue);
2215 INIT_DELAYED_WORK(&periodic_controlvm_work,
2216 controlvm_periodic_work);
2217 periodic_controlvm_workqueue =
2218 create_singlethread_workqueue("visorchipset_controlvm");
2220 if (!periodic_controlvm_workqueue) {
2221 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2226 most_recent_message_jiffies = jiffies;
2227 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2228 rc = queue_delayed_work(periodic_controlvm_workqueue,
2229 &periodic_controlvm_work, poll_jiffies);
2231 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2237 visorchipset_platform_device.dev.devt = major_dev;
2238 if (platform_device_register(&visorchipset_platform_device) < 0) {
2239 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2243 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2247 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2248 POSTCODE_SEVERITY_ERR);
2254 visorchipset_exit(void)
2256 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2258 if (visorchipset_disable_controlvm) {
2261 cancel_delayed_work(&periodic_controlvm_work);
2262 flush_workqueue(periodic_controlvm_workqueue);
2263 destroy_workqueue(periodic_controlvm_workqueue);
2264 periodic_controlvm_workqueue = NULL;
2265 destroy_controlvm_payload_info(&controlvm_payload_info);
2267 if (putfile_buffer_list_pool) {
2268 kmem_cache_destroy(putfile_buffer_list_pool);
2269 putfile_buffer_list_pool = NULL;
2272 cleanup_controlvm_structures();
2274 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2276 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2278 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2280 visorchannel_destroy(controlvm_channel);
2282 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2283 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2286 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2287 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2288 int visorchipset_testvnic = 0;
2290 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2291 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2292 int visorchipset_testvnicclient = 0;
2294 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2295 MODULE_PARM_DESC(visorchipset_testmsg,
2296 "1 to manufacture the chipset, bus, and switch messages");
2297 int visorchipset_testmsg = 0;
2299 module_param_named(major, visorchipset_major, int, S_IRUGO);
2300 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2301 int visorchipset_major = 0;
2303 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2304 MODULE_PARM_DESC(visorchipset_serverreqwait,
2305 "1 to have the module wait for the visor bus to register");
2306 int visorchipset_serverregwait = 0; /* default is off */
2307 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2308 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2309 int visorchipset_clientregwait = 1; /* default is on */
2310 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2311 MODULE_PARM_DESC(visorchipset_testteardown,
2312 "1 to test teardown of the chipset, bus, and switch");
2313 int visorchipset_testteardown = 0; /* default is off */
2314 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2316 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2317 "1 to disable polling of controlVm channel");
2318 int visorchipset_disable_controlvm = 0; /* default is off */
2319 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2321 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2322 "1 to hold response to CHIPSET_READY");
2323 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2324 * response immediately */
2325 module_init(visorchipset_init);
2326 module_exit(visorchipset_exit);
2328 MODULE_AUTHOR("Unisys");
2329 MODULE_LICENSE("GPL");
2330 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2332 MODULE_VERSION(VERSION);