2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
36 #include "hyperv_vmbus.h"
38 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
40 static const struct vmbus_device vmbus_devs[] = {
48 { .dev_type = HV_SCSI,
72 { .dev_type = HV_PCIE,
77 /* Synthetic Frame Buffer */
83 /* Synthetic Keyboard */
90 { .dev_type = HV_MOUSE,
104 .perf_device = false,
110 .perf_device = false,
114 { .dev_type = HV_SHUTDOWN,
116 .perf_device = false,
120 { .dev_type = HV_FCOPY,
122 .perf_device = false,
126 { .dev_type = HV_BACKUP,
128 .perf_device = false,
134 .perf_device = false,
138 { .dev_type = HV_UNKNOWN,
139 .perf_device = false,
143 static const struct {
145 } vmbus_unsupported_devs[] = {
152 * The rescinded channel may be blocked waiting for a response from the host;
155 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
157 struct vmbus_channel_msginfo *msginfo;
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
166 if (msginfo->waiting_channel == channel) {
167 complete(&msginfo->waitevent);
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
174 static bool is_unsupported_vmbus_devs(const uuid_le *guid)
178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
184 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
186 const uuid_le *guid = &channel->offermsg.offer.if_type;
189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
192 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
196 pr_info("Unknown GUID: %pUl\n", guid);
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
209 * The fw_version and fw_vercnt specifies the framework version that
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
215 * Versions are given in decreasing order.
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
219 * Mainly used by Hyper-V drivers.
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222 u8 *buf, const int *fw_version, int fw_vercnt,
223 const int *srv_version, int srv_vercnt,
224 int *nego_fw_version, int *nego_srv_version)
226 int icframe_major, icframe_minor;
227 int icmsg_major, icmsg_minor;
228 int fw_major, fw_minor;
229 int srv_major, srv_minor;
231 bool found_match = false;
232 struct icmsg_negotiate *negop;
234 icmsghdrp->icmsgsize = 0x10;
235 negop = (struct icmsg_negotiate *)&buf[
236 sizeof(struct vmbuspipe_hdr) +
237 sizeof(struct icmsg_hdr)];
239 icframe_major = negop->icframe_vercnt;
242 icmsg_major = negop->icmsg_vercnt;
246 * Select the framework version number we will
250 for (i = 0; i < fw_vercnt; i++) {
251 fw_major = (fw_version[i] >> 16);
252 fw_minor = (fw_version[i] & 0xFFFF);
254 for (j = 0; j < negop->icframe_vercnt; j++) {
255 if ((negop->icversion_data[j].major == fw_major) &&
256 (negop->icversion_data[j].minor == fw_minor)) {
257 icframe_major = negop->icversion_data[j].major;
258 icframe_minor = negop->icversion_data[j].minor;
273 for (i = 0; i < srv_vercnt; i++) {
274 srv_major = (srv_version[i] >> 16);
275 srv_minor = (srv_version[i] & 0xFFFF);
277 for (j = negop->icframe_vercnt;
278 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
281 if ((negop->icversion_data[j].major == srv_major) &&
282 (negop->icversion_data[j].minor == srv_minor)) {
284 icmsg_major = negop->icversion_data[j].major;
285 icmsg_minor = negop->icversion_data[j].minor;
296 * Respond with the framework and service
297 * version numbers we can support.
302 negop->icframe_vercnt = 0;
303 negop->icmsg_vercnt = 0;
305 negop->icframe_vercnt = 1;
306 negop->icmsg_vercnt = 1;
310 *nego_fw_version = (icframe_major << 16) | icframe_minor;
312 if (nego_srv_version)
313 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
315 negop->icversion_data[0].major = icframe_major;
316 negop->icversion_data[0].minor = icframe_minor;
317 negop->icversion_data[1].major = icmsg_major;
318 negop->icversion_data[1].minor = icmsg_minor;
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
325 * alloc_channel - Allocate and initialize a vmbus channel object
327 static struct vmbus_channel *alloc_channel(void)
329 struct vmbus_channel *channel;
331 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
335 spin_lock_init(&channel->inbound_lock);
336 spin_lock_init(&channel->lock);
338 INIT_LIST_HEAD(&channel->sc_list);
339 INIT_LIST_HEAD(&channel->percpu_list);
341 tasklet_init(&channel->callback_event,
342 vmbus_on_event, (unsigned long)channel);
348 * free_channel - Release the resources used by the vmbus channel object
350 static void free_channel(struct vmbus_channel *channel)
352 tasklet_kill(&channel->callback_event);
356 static void percpu_channel_enq(void *arg)
358 struct vmbus_channel *channel = arg;
359 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context);
362 list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
365 static void percpu_channel_deq(void *arg)
367 struct vmbus_channel *channel = arg;
369 list_del(&channel->percpu_list);
373 static void vmbus_release_relid(u32 relid)
375 struct vmbus_channel_relid_released msg;
377 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
378 msg.child_relid = relid;
379 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
380 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
384 void hv_event_tasklet_disable(struct vmbus_channel *channel)
386 tasklet_disable(&channel->callback_event);
389 void hv_event_tasklet_enable(struct vmbus_channel *channel)
391 tasklet_enable(&channel->callback_event);
393 /* In case there is any pending event */
394 tasklet_schedule(&channel->callback_event);
397 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
400 struct vmbus_channel *primary_channel;
402 BUG_ON(!channel->rescind);
403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
405 hv_event_tasklet_disable(channel);
406 if (channel->target_cpu != get_cpu()) {
408 smp_call_function_single(channel->target_cpu,
409 percpu_channel_deq, channel, true);
411 percpu_channel_deq(channel);
414 hv_event_tasklet_enable(channel);
416 if (channel->primary_channel == NULL) {
417 list_del(&channel->listentry);
419 primary_channel = channel;
421 primary_channel = channel->primary_channel;
422 spin_lock_irqsave(&primary_channel->lock, flags);
423 list_del(&channel->sc_list);
424 primary_channel->num_sc--;
425 spin_unlock_irqrestore(&primary_channel->lock, flags);
429 * We need to free the bit for init_vp_index() to work in the case
430 * of sub-channel, when we reload drivers like hv_netvsc.
432 if (channel->affinity_policy == HV_LOCALIZED)
433 cpumask_clear_cpu(channel->target_cpu,
434 &primary_channel->alloced_cpus_in_node);
436 vmbus_release_relid(relid);
438 free_channel(channel);
441 void vmbus_free_channels(void)
443 struct vmbus_channel *channel, *tmp;
445 mutex_lock(&vmbus_connection.channel_mutex);
446 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
448 /* hv_process_channel_removal() needs this */
449 channel->rescind = true;
451 vmbus_device_unregister(channel->device_obj);
453 mutex_unlock(&vmbus_connection.channel_mutex);
457 * vmbus_process_offer - Process the offer by creating a channel/device
458 * associated with this offer
460 static void vmbus_process_offer(struct vmbus_channel *newchannel)
462 struct vmbus_channel *channel;
468 /* Make sure this is a new offer */
469 mutex_lock(&vmbus_connection.channel_mutex);
471 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
472 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
473 newchannel->offermsg.offer.if_type) &&
474 !uuid_le_cmp(channel->offermsg.offer.if_instance,
475 newchannel->offermsg.offer.if_instance)) {
482 list_add_tail(&newchannel->listentry,
483 &vmbus_connection.chn_list);
485 mutex_unlock(&vmbus_connection.channel_mutex);
489 * Check to see if this is a sub-channel.
491 if (newchannel->offermsg.offer.sub_channel_index != 0) {
493 * Process the sub-channel.
495 newchannel->primary_channel = channel;
496 spin_lock_irqsave(&channel->lock, flags);
497 list_add_tail(&newchannel->sc_list, &channel->sc_list);
499 spin_unlock_irqrestore(&channel->lock, flags);
504 dev_type = hv_get_dev_type(newchannel);
506 init_vp_index(newchannel, dev_type);
508 hv_event_tasklet_disable(newchannel);
509 if (newchannel->target_cpu != get_cpu()) {
511 smp_call_function_single(newchannel->target_cpu,
515 percpu_channel_enq(newchannel);
518 hv_event_tasklet_enable(newchannel);
521 * This state is used to indicate a successful open
522 * so that when we do close the channel normally, we
523 * can cleanup properly
525 newchannel->state = CHANNEL_OPEN_STATE;
528 if (channel->sc_creation_callback != NULL)
529 channel->sc_creation_callback(newchannel);
534 * Start the process of binding this offer to the driver
535 * We need to set the DeviceObject field before calling
536 * vmbus_child_dev_add()
538 newchannel->device_obj = vmbus_device_create(
539 &newchannel->offermsg.offer.if_type,
540 &newchannel->offermsg.offer.if_instance,
542 if (!newchannel->device_obj)
545 newchannel->device_obj->device_id = dev_type;
547 * Add the new device to the bus. This will kick off device-driver
548 * binding which eventually invokes the device driver's AddDevice()
551 mutex_lock(&vmbus_connection.channel_mutex);
552 ret = vmbus_device_register(newchannel->device_obj);
553 mutex_unlock(&vmbus_connection.channel_mutex);
556 pr_err("unable to add child device object (relid %d)\n",
557 newchannel->offermsg.child_relid);
558 kfree(newchannel->device_obj);
564 mutex_lock(&vmbus_connection.channel_mutex);
565 list_del(&newchannel->listentry);
566 mutex_unlock(&vmbus_connection.channel_mutex);
568 hv_event_tasklet_disable(newchannel);
569 if (newchannel->target_cpu != get_cpu()) {
571 smp_call_function_single(newchannel->target_cpu,
572 percpu_channel_deq, newchannel, true);
574 percpu_channel_deq(newchannel);
577 hv_event_tasklet_enable(newchannel);
579 vmbus_release_relid(newchannel->offermsg.child_relid);
582 free_channel(newchannel);
586 * We use this state to statically distribute the channel interrupt load.
588 static int next_numa_node_id;
591 * Starting with Win8, we can statically distribute the incoming
592 * channel interrupt load by binding a channel to VCPU.
593 * We do this in a hierarchical fashion:
594 * First distribute the primary channels across available NUMA nodes
595 * and then distribute the subchannels amongst the CPUs in the NUMA
596 * node assigned to the primary channel.
598 * For pre-win8 hosts or non-performance critical channels we assign the
599 * first CPU in the first NUMA node.
601 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
604 bool perf_chn = vmbus_devs[dev_type].perf_device;
605 struct vmbus_channel *primary = channel->primary_channel;
607 struct cpumask available_mask;
608 struct cpumask *alloced_mask;
610 if ((vmbus_proto_version == VERSION_WS2008) ||
611 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
613 * Prior to win8, all channel interrupts are
614 * delivered on cpu 0.
615 * Also if the channel is not a performance critical
616 * channel, bind it to cpu 0.
618 channel->numa_node = 0;
619 channel->target_cpu = 0;
620 channel->target_vp = hv_context.vp_index[0];
625 * Based on the channel affinity policy, we will assign the NUMA
629 if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
631 next_node = next_numa_node_id++;
632 if (next_node == nr_node_ids) {
633 next_node = next_numa_node_id = 0;
636 if (cpumask_empty(cpumask_of_node(next_node)))
640 channel->numa_node = next_node;
643 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
645 if (cpumask_weight(alloced_mask) ==
646 cpumask_weight(cpumask_of_node(primary->numa_node))) {
648 * We have cycled through all the CPUs in the node;
649 * reset the alloced map.
651 cpumask_clear(alloced_mask);
654 cpumask_xor(&available_mask, alloced_mask,
655 cpumask_of_node(primary->numa_node));
659 if (primary->affinity_policy == HV_LOCALIZED) {
661 * Normally Hyper-V host doesn't create more subchannels
662 * than there are VCPUs on the node but it is possible when not
663 * all present VCPUs on the node are initialized by guest.
664 * Clear the alloced_cpus_in_node to start over.
666 if (cpumask_equal(&primary->alloced_cpus_in_node,
667 cpumask_of_node(primary->numa_node)))
668 cpumask_clear(&primary->alloced_cpus_in_node);
672 cur_cpu = cpumask_next(cur_cpu, &available_mask);
673 if (cur_cpu >= nr_cpu_ids) {
675 cpumask_copy(&available_mask,
676 cpumask_of_node(primary->numa_node));
680 if (primary->affinity_policy == HV_LOCALIZED) {
682 * NOTE: in the case of sub-channel, we clear the
683 * sub-channel related bit(s) in
684 * primary->alloced_cpus_in_node in
685 * hv_process_channel_removal(), so when we
686 * reload drivers like hv_netvsc in SMP guest, here
687 * we're able to re-allocate
688 * bit from primary->alloced_cpus_in_node.
690 if (!cpumask_test_cpu(cur_cpu,
691 &primary->alloced_cpus_in_node)) {
692 cpumask_set_cpu(cur_cpu,
693 &primary->alloced_cpus_in_node);
694 cpumask_set_cpu(cur_cpu, alloced_mask);
698 cpumask_set_cpu(cur_cpu, alloced_mask);
703 channel->target_cpu = cur_cpu;
704 channel->target_vp = hv_context.vp_index[cur_cpu];
707 static void vmbus_wait_for_unload(void)
711 struct hv_message *msg;
712 struct vmbus_channel_message_header *hdr;
716 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
717 * used for initial contact or to CPU0 depending on host version. When
718 * we're crashing on a different CPU let's hope that IRQ handler on
719 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
720 * functional and vmbus_unload_response() will complete
721 * vmbus_connection.unload_event. If not, the last thing we can do is
722 * read message pages for all CPUs directly.
725 if (completion_done(&vmbus_connection.unload_event))
728 for_each_online_cpu(cpu) {
729 struct hv_per_cpu_context *hv_cpu
730 = per_cpu_ptr(hv_context.cpu_context, cpu);
732 page_addr = hv_cpu->synic_message_page;
733 msg = (struct hv_message *)page_addr
734 + VMBUS_MESSAGE_SINT;
736 message_type = READ_ONCE(msg->header.message_type);
737 if (message_type == HVMSG_NONE)
740 hdr = (struct vmbus_channel_message_header *)
743 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
744 complete(&vmbus_connection.unload_event);
746 vmbus_signal_eom(msg, message_type);
753 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
754 * maybe-pending messages on all CPUs to be able to receive new
755 * messages after we reconnect.
757 for_each_online_cpu(cpu) {
758 struct hv_per_cpu_context *hv_cpu
759 = per_cpu_ptr(hv_context.cpu_context, cpu);
761 page_addr = hv_cpu->synic_message_page;
762 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
763 msg->header.message_type = HVMSG_NONE;
768 * vmbus_unload_response - Handler for the unload response.
770 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
773 * This is a global event; just wakeup the waiting thread.
774 * Once we successfully unload, we can cleanup the monitor state.
776 complete(&vmbus_connection.unload_event);
779 void vmbus_initiate_unload(bool crash)
781 struct vmbus_channel_message_header hdr;
783 /* Pre-Win2012R2 hosts don't support reconnect */
784 if (vmbus_proto_version < VERSION_WIN8_1)
787 init_completion(&vmbus_connection.unload_event);
788 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
789 hdr.msgtype = CHANNELMSG_UNLOAD;
790 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
794 * vmbus_initiate_unload() is also called on crash and the crash can be
795 * happening in an interrupt context, where scheduling is impossible.
798 wait_for_completion(&vmbus_connection.unload_event);
800 vmbus_wait_for_unload();
804 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
807 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
809 struct vmbus_channel_offer_channel *offer;
810 struct vmbus_channel *newchannel;
812 offer = (struct vmbus_channel_offer_channel *)hdr;
814 /* Allocate the channel object and save this offer. */
815 newchannel = alloc_channel();
817 pr_err("Unable to allocate channel object\n");
822 * Setup state for signalling the host.
824 newchannel->sig_event = (struct hv_input_signal_event *)
825 (ALIGN((unsigned long)
826 &newchannel->sig_buf,
827 HV_HYPERCALL_PARAM_ALIGN));
829 newchannel->sig_event->connectionid.asu32 = 0;
830 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
831 newchannel->sig_event->flag_number = 0;
832 newchannel->sig_event->rsvdz = 0;
834 if (vmbus_proto_version != VERSION_WS2008) {
835 newchannel->is_dedicated_interrupt =
836 (offer->is_dedicated_interrupt != 0);
837 newchannel->sig_event->connectionid.u.id =
838 offer->connection_id;
841 memcpy(&newchannel->offermsg, offer,
842 sizeof(struct vmbus_channel_offer_channel));
843 newchannel->monitor_grp = (u8)offer->monitorid / 32;
844 newchannel->monitor_bit = (u8)offer->monitorid % 32;
846 vmbus_process_offer(newchannel);
850 * vmbus_onoffer_rescind - Rescind offer handler.
852 * We queue a work item to process this offer synchronously
854 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
856 struct vmbus_channel_rescind_offer *rescind;
857 struct vmbus_channel *channel;
861 rescind = (struct vmbus_channel_rescind_offer *)hdr;
863 mutex_lock(&vmbus_connection.channel_mutex);
864 channel = relid2channel(rescind->child_relid);
866 if (channel == NULL) {
868 * This is very impossible, because in
869 * vmbus_process_offer(), we have already invoked
870 * vmbus_release_relid() on error.
875 spin_lock_irqsave(&channel->lock, flags);
876 channel->rescind = true;
877 spin_unlock_irqrestore(&channel->lock, flags);
879 vmbus_rescind_cleanup(channel);
881 if (channel->device_obj) {
882 if (channel->chn_rescind_callback) {
883 channel->chn_rescind_callback(channel);
887 * We will have to unregister this device from the
890 dev = get_device(&channel->device_obj->device);
892 vmbus_device_unregister(channel->device_obj);
896 hv_process_channel_removal(channel,
897 channel->offermsg.child_relid);
901 mutex_unlock(&vmbus_connection.channel_mutex);
904 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
906 mutex_lock(&vmbus_connection.channel_mutex);
908 BUG_ON(!is_hvsock_channel(channel));
910 channel->rescind = true;
911 vmbus_device_unregister(channel->device_obj);
913 mutex_unlock(&vmbus_connection.channel_mutex);
915 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
919 * vmbus_onoffers_delivered -
920 * This is invoked when all offers have been delivered.
922 * Nothing to do here.
924 static void vmbus_onoffers_delivered(
925 struct vmbus_channel_message_header *hdr)
930 * vmbus_onopen_result - Open result handler.
932 * This is invoked when we received a response to our channel open request.
933 * Find the matching request, copy the response and signal the requesting
936 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
938 struct vmbus_channel_open_result *result;
939 struct vmbus_channel_msginfo *msginfo;
940 struct vmbus_channel_message_header *requestheader;
941 struct vmbus_channel_open_channel *openmsg;
944 result = (struct vmbus_channel_open_result *)hdr;
947 * Find the open msg, copy the result and signal/unblock the wait event
949 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
951 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
954 (struct vmbus_channel_message_header *)msginfo->msg;
956 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
958 (struct vmbus_channel_open_channel *)msginfo->msg;
959 if (openmsg->child_relid == result->child_relid &&
960 openmsg->openid == result->openid) {
961 memcpy(&msginfo->response.open_result,
964 struct vmbus_channel_open_result));
965 complete(&msginfo->waitevent);
970 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
974 * vmbus_ongpadl_created - GPADL created handler.
976 * This is invoked when we received a response to our gpadl create request.
977 * Find the matching request, copy the response and signal the requesting
980 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
982 struct vmbus_channel_gpadl_created *gpadlcreated;
983 struct vmbus_channel_msginfo *msginfo;
984 struct vmbus_channel_message_header *requestheader;
985 struct vmbus_channel_gpadl_header *gpadlheader;
988 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
991 * Find the establish msg, copy the result and signal/unblock the wait
994 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
996 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
999 (struct vmbus_channel_message_header *)msginfo->msg;
1001 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1003 (struct vmbus_channel_gpadl_header *)requestheader;
1005 if ((gpadlcreated->child_relid ==
1006 gpadlheader->child_relid) &&
1007 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1008 memcpy(&msginfo->response.gpadl_created,
1011 struct vmbus_channel_gpadl_created));
1012 complete(&msginfo->waitevent);
1017 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1021 * vmbus_ongpadl_torndown - GPADL torndown handler.
1023 * This is invoked when we received a response to our gpadl teardown request.
1024 * Find the matching request, copy the response and signal the requesting
1027 static void vmbus_ongpadl_torndown(
1028 struct vmbus_channel_message_header *hdr)
1030 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1031 struct vmbus_channel_msginfo *msginfo;
1032 struct vmbus_channel_message_header *requestheader;
1033 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1034 unsigned long flags;
1036 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1039 * Find the open msg, copy the result and signal/unblock the wait event
1041 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1043 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1046 (struct vmbus_channel_message_header *)msginfo->msg;
1048 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1050 (struct vmbus_channel_gpadl_teardown *)requestheader;
1052 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1053 memcpy(&msginfo->response.gpadl_torndown,
1056 struct vmbus_channel_gpadl_torndown));
1057 complete(&msginfo->waitevent);
1062 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1066 * vmbus_onversion_response - Version response handler
1068 * This is invoked when we received a response to our initiate contact request.
1069 * Find the matching request, copy the response and signal the requesting
1072 static void vmbus_onversion_response(
1073 struct vmbus_channel_message_header *hdr)
1075 struct vmbus_channel_msginfo *msginfo;
1076 struct vmbus_channel_message_header *requestheader;
1077 struct vmbus_channel_version_response *version_response;
1078 unsigned long flags;
1080 version_response = (struct vmbus_channel_version_response *)hdr;
1081 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1083 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1086 (struct vmbus_channel_message_header *)msginfo->msg;
1088 if (requestheader->msgtype ==
1089 CHANNELMSG_INITIATE_CONTACT) {
1090 memcpy(&msginfo->response.version_response,
1092 sizeof(struct vmbus_channel_version_response));
1093 complete(&msginfo->waitevent);
1096 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1099 /* Channel message dispatch table */
1100 struct vmbus_channel_message_table_entry
1101 channel_message_table[CHANNELMSG_COUNT] = {
1102 {CHANNELMSG_INVALID, 0, NULL},
1103 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
1104 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
1105 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
1106 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
1107 {CHANNELMSG_OPENCHANNEL, 0, NULL},
1108 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
1109 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
1110 {CHANNELMSG_GPADL_HEADER, 0, NULL},
1111 {CHANNELMSG_GPADL_BODY, 0, NULL},
1112 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
1113 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
1114 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
1115 {CHANNELMSG_RELID_RELEASED, 0, NULL},
1116 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
1117 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
1118 {CHANNELMSG_UNLOAD, 0, NULL},
1119 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
1120 {CHANNELMSG_18, 0, NULL},
1121 {CHANNELMSG_19, 0, NULL},
1122 {CHANNELMSG_20, 0, NULL},
1123 {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
1127 * vmbus_onmessage - Handler for channel protocol messages.
1129 * This is invoked in the vmbus worker thread context.
1131 void vmbus_onmessage(void *context)
1133 struct hv_message *msg = context;
1134 struct vmbus_channel_message_header *hdr;
1137 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1138 size = msg->header.payload_size;
1140 if (hdr->msgtype >= CHANNELMSG_COUNT) {
1141 pr_err("Received invalid channel message type %d size %d\n",
1142 hdr->msgtype, size);
1143 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1144 (unsigned char *)msg->u.payload, size);
1148 if (channel_message_table[hdr->msgtype].message_handler)
1149 channel_message_table[hdr->msgtype].message_handler(hdr);
1151 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1155 * vmbus_request_offers - Send a request to get all our pending offers.
1157 int vmbus_request_offers(void)
1159 struct vmbus_channel_message_header *msg;
1160 struct vmbus_channel_msginfo *msginfo;
1163 msginfo = kmalloc(sizeof(*msginfo) +
1164 sizeof(struct vmbus_channel_message_header),
1169 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1171 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1174 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1177 pr_err("Unable to request offers - %d\n", ret);
1189 * Retrieve the (sub) channel on which to send an outgoing request.
1190 * When a primary channel has multiple sub-channels, we try to
1191 * distribute the load equally amongst all available channels.
1193 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1195 struct list_head *cur, *tmp;
1197 struct vmbus_channel *cur_channel;
1198 struct vmbus_channel *outgoing_channel = primary;
1202 if (list_empty(&primary->sc_list))
1203 return outgoing_channel;
1205 next_channel = primary->next_oc++;
1207 if (next_channel > (primary->num_sc)) {
1208 primary->next_oc = 0;
1209 return outgoing_channel;
1212 cur_cpu = hv_context.vp_index[get_cpu()];
1214 list_for_each_safe(cur, tmp, &primary->sc_list) {
1215 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1216 if (cur_channel->state != CHANNEL_OPENED_STATE)
1219 if (cur_channel->target_vp == cur_cpu)
1222 if (i == next_channel)
1228 return outgoing_channel;
1230 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1232 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1234 struct list_head *cur, *tmp;
1235 struct vmbus_channel *cur_channel;
1237 if (primary_channel->sc_creation_callback == NULL)
1240 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1241 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1243 primary_channel->sc_creation_callback(cur_channel);
1247 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1248 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1250 primary_channel->sc_creation_callback = sc_cr_cb;
1252 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1254 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1258 ret = !list_empty(&primary->sc_list);
1262 * Invoke the callback on sub-channel creation.
1263 * This will present a uniform interface to the
1266 invoke_sc_cb(primary);
1271 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1273 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1274 void (*chn_rescind_cb)(struct vmbus_channel *))
1276 channel->chn_rescind_callback = chn_rescind_cb;
1278 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);