2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
34 #include "hyperv_net.h"
37 * An API to support in-place processing of incoming VMBUS packets.
39 #define VMBUS_PKT_TRAILER 8
41 static struct vmpacket_descriptor *
42 get_next_pkt_raw(struct vmbus_channel *channel)
44 struct hv_ring_buffer_info *ring_info = &channel->inbound;
45 u32 read_loc = ring_info->priv_read_index;
46 void *ring_buffer = hv_get_ring_buffer(ring_info);
47 struct vmpacket_descriptor *cur_desc;
49 u32 dsize = ring_info->ring_datasize;
50 u32 delta = read_loc - ring_info->ring_buffer->read_index;
51 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
53 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
56 if ((read_loc + sizeof(*cur_desc)) > dsize)
59 cur_desc = ring_buffer + read_loc;
60 packetlen = cur_desc->len8 << 3;
63 * If the packet under consideration is wrapping around,
66 if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
73 * A helper function to step through packets "in-place"
74 * This API is to be called after each successful call
77 static void put_pkt_raw(struct vmbus_channel *channel,
78 struct vmpacket_descriptor *desc)
80 struct hv_ring_buffer_info *ring_info = &channel->inbound;
81 u32 read_loc = ring_info->priv_read_index;
82 u32 packetlen = desc->len8 << 3;
83 u32 dsize = ring_info->ring_datasize;
85 BUG_ON((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize);
88 * Include the packet trailer.
90 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
94 * This call commits the read index and potentially signals the host.
95 * Here is the pattern for using the "in-place" consumption APIs:
97 * while (get_next_pkt_raw() {
98 * process the packet "in-place";
101 * if (packets processed in place)
104 static void commit_rd_index(struct vmbus_channel *channel)
106 struct hv_ring_buffer_info *ring_info = &channel->inbound;
108 * Make sure all reads are done before we update the read index since
109 * the writer may start writing to the read area once the read index
113 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
115 if (hv_need_to_signal_on_read(ring_info))
116 vmbus_set_event(channel);
120 * Switch the data path from the synthetic interface to the VF
123 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
125 struct net_device_context *net_device_ctx = netdev_priv(ndev);
126 struct hv_device *dev = net_device_ctx->device_ctx;
127 struct netvsc_device *nv_dev = net_device_ctx->nvdev;
128 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
130 memset(init_pkt, 0, sizeof(struct nvsp_message));
131 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
133 init_pkt->msg.v4_msg.active_dp.active_datapath =
136 init_pkt->msg.v4_msg.active_dp.active_datapath =
137 NVSP_DATAPATH_SYNTHETIC;
139 vmbus_sendpacket(dev->channel, init_pkt,
140 sizeof(struct nvsp_message),
141 (unsigned long)init_pkt,
142 VM_PKT_DATA_INBAND, 0);
145 static struct netvsc_device *alloc_net_device(void)
147 struct netvsc_device *net_device;
149 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
153 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
154 if (!net_device->cb_buffer) {
159 net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
160 sizeof(struct recv_comp_data));
162 init_waitqueue_head(&net_device->wait_drain);
163 net_device->destroy = false;
164 atomic_set(&net_device->open_cnt, 0);
165 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
166 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
167 init_completion(&net_device->channel_init_wait);
172 static void free_netvsc_device(struct netvsc_device *nvdev)
176 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
177 vfree(nvdev->mrc[i].buf);
179 kfree(nvdev->cb_buffer);
183 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
185 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
187 if (net_device && net_device->destroy)
193 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
195 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
200 if (net_device->destroy &&
201 atomic_read(&net_device->num_outstanding_sends) == 0 &&
202 atomic_read(&net_device->num_outstanding_recvs) == 0)
209 static int netvsc_destroy_buf(struct hv_device *device)
211 struct nvsp_message *revoke_packet;
213 struct net_device *ndev = hv_get_drvdata(device);
214 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
217 * If we got a section count, it means we received a
218 * SendReceiveBufferComplete msg (ie sent
219 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
220 * to send a revoke msg here
222 if (net_device->recv_section_cnt) {
223 /* Send the revoke receive buffer */
224 revoke_packet = &net_device->revoke_packet;
225 memset(revoke_packet, 0, sizeof(struct nvsp_message));
227 revoke_packet->hdr.msg_type =
228 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
229 revoke_packet->msg.v1_msg.
230 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
232 ret = vmbus_sendpacket(device->channel,
234 sizeof(struct nvsp_message),
235 (unsigned long)revoke_packet,
236 VM_PKT_DATA_INBAND, 0);
238 * If we failed here, we might as well return and
239 * have a leak rather than continue and a bugchk
242 netdev_err(ndev, "unable to send "
243 "revoke receive buffer to netvsp\n");
248 /* Teardown the gpadl on the vsp end */
249 if (net_device->recv_buf_gpadl_handle) {
250 ret = vmbus_teardown_gpadl(device->channel,
251 net_device->recv_buf_gpadl_handle);
253 /* If we failed here, we might as well return and have a leak
254 * rather than continue and a bugchk
258 "unable to teardown receive buffer's gpadl\n");
261 net_device->recv_buf_gpadl_handle = 0;
264 if (net_device->recv_buf) {
265 /* Free up the receive buffer */
266 vfree(net_device->recv_buf);
267 net_device->recv_buf = NULL;
270 if (net_device->recv_section) {
271 net_device->recv_section_cnt = 0;
272 kfree(net_device->recv_section);
273 net_device->recv_section = NULL;
276 /* Deal with the send buffer we may have setup.
277 * If we got a send section size, it means we received a
278 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
279 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
280 * to send a revoke msg here
282 if (net_device->send_section_size) {
283 /* Send the revoke receive buffer */
284 revoke_packet = &net_device->revoke_packet;
285 memset(revoke_packet, 0, sizeof(struct nvsp_message));
287 revoke_packet->hdr.msg_type =
288 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
289 revoke_packet->msg.v1_msg.revoke_send_buf.id =
290 NETVSC_SEND_BUFFER_ID;
292 ret = vmbus_sendpacket(device->channel,
294 sizeof(struct nvsp_message),
295 (unsigned long)revoke_packet,
296 VM_PKT_DATA_INBAND, 0);
297 /* If we failed here, we might as well return and
298 * have a leak rather than continue and a bugchk
301 netdev_err(ndev, "unable to send "
302 "revoke send buffer to netvsp\n");
306 /* Teardown the gpadl on the vsp end */
307 if (net_device->send_buf_gpadl_handle) {
308 ret = vmbus_teardown_gpadl(device->channel,
309 net_device->send_buf_gpadl_handle);
311 /* If we failed here, we might as well return and have a leak
312 * rather than continue and a bugchk
316 "unable to teardown send buffer's gpadl\n");
319 net_device->send_buf_gpadl_handle = 0;
321 if (net_device->send_buf) {
322 /* Free up the send buffer */
323 vfree(net_device->send_buf);
324 net_device->send_buf = NULL;
326 kfree(net_device->send_section_map);
331 static int netvsc_init_buf(struct hv_device *device)
334 struct netvsc_device *net_device;
335 struct nvsp_message *init_packet;
336 struct net_device *ndev;
339 net_device = get_outbound_net_device(device);
342 ndev = hv_get_drvdata(device);
344 node = cpu_to_node(device->channel->target_cpu);
345 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
346 if (!net_device->recv_buf)
347 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
349 if (!net_device->recv_buf) {
350 netdev_err(ndev, "unable to allocate receive "
351 "buffer of size %d\n", net_device->recv_buf_size);
357 * Establish the gpadl handle for this buffer on this
358 * channel. Note: This call uses the vmbus connection rather
359 * than the channel to establish the gpadl handle.
361 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
362 net_device->recv_buf_size,
363 &net_device->recv_buf_gpadl_handle);
366 "unable to establish receive buffer's gpadl\n");
370 /* Notify the NetVsp of the gpadl handle */
371 init_packet = &net_device->channel_init_pkt;
373 memset(init_packet, 0, sizeof(struct nvsp_message));
375 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
376 init_packet->msg.v1_msg.send_recv_buf.
377 gpadl_handle = net_device->recv_buf_gpadl_handle;
378 init_packet->msg.v1_msg.
379 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
381 /* Send the gpadl notification request */
382 ret = vmbus_sendpacket(device->channel, init_packet,
383 sizeof(struct nvsp_message),
384 (unsigned long)init_packet,
386 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
389 "unable to send receive buffer's gpadl to netvsp\n");
393 wait_for_completion(&net_device->channel_init_wait);
395 /* Check the response */
396 if (init_packet->msg.v1_msg.
397 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
398 netdev_err(ndev, "Unable to complete receive buffer "
399 "initialization with NetVsp - status %d\n",
400 init_packet->msg.v1_msg.
401 send_recv_buf_complete.status);
406 /* Parse the response */
408 net_device->recv_section_cnt = init_packet->msg.
409 v1_msg.send_recv_buf_complete.num_sections;
411 net_device->recv_section = kmemdup(
412 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
413 net_device->recv_section_cnt *
414 sizeof(struct nvsp_1_receive_buffer_section),
416 if (net_device->recv_section == NULL) {
422 * For 1st release, there should only be 1 section that represents the
423 * entire receive buffer
425 if (net_device->recv_section_cnt != 1 ||
426 net_device->recv_section->offset != 0) {
431 /* Now setup the send buffer.
433 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
434 if (!net_device->send_buf)
435 net_device->send_buf = vzalloc(net_device->send_buf_size);
436 if (!net_device->send_buf) {
437 netdev_err(ndev, "unable to allocate send "
438 "buffer of size %d\n", net_device->send_buf_size);
443 /* Establish the gpadl handle for this buffer on this
444 * channel. Note: This call uses the vmbus connection rather
445 * than the channel to establish the gpadl handle.
447 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
448 net_device->send_buf_size,
449 &net_device->send_buf_gpadl_handle);
452 "unable to establish send buffer's gpadl\n");
456 /* Notify the NetVsp of the gpadl handle */
457 init_packet = &net_device->channel_init_pkt;
458 memset(init_packet, 0, sizeof(struct nvsp_message));
459 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
460 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
461 net_device->send_buf_gpadl_handle;
462 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
464 /* Send the gpadl notification request */
465 ret = vmbus_sendpacket(device->channel, init_packet,
466 sizeof(struct nvsp_message),
467 (unsigned long)init_packet,
469 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
472 "unable to send send buffer's gpadl to netvsp\n");
476 wait_for_completion(&net_device->channel_init_wait);
478 /* Check the response */
479 if (init_packet->msg.v1_msg.
480 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
481 netdev_err(ndev, "Unable to complete send buffer "
482 "initialization with NetVsp - status %d\n",
483 init_packet->msg.v1_msg.
484 send_send_buf_complete.status);
489 /* Parse the response */
490 net_device->send_section_size = init_packet->msg.
491 v1_msg.send_send_buf_complete.section_size;
493 /* Section count is simply the size divided by the section size.
495 net_device->send_section_cnt =
496 net_device->send_buf_size / net_device->send_section_size;
498 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
499 net_device->send_section_size, net_device->send_section_cnt);
501 /* Setup state for managing the send buffer. */
502 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
505 net_device->send_section_map = kcalloc(net_device->map_words,
506 sizeof(ulong), GFP_KERNEL);
507 if (net_device->send_section_map == NULL) {
515 netvsc_destroy_buf(device);
521 /* Negotiate NVSP protocol version */
522 static int negotiate_nvsp_ver(struct hv_device *device,
523 struct netvsc_device *net_device,
524 struct nvsp_message *init_packet,
527 struct net_device *ndev = hv_get_drvdata(device);
530 memset(init_packet, 0, sizeof(struct nvsp_message));
531 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
532 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
533 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
535 /* Send the init request */
536 ret = vmbus_sendpacket(device->channel, init_packet,
537 sizeof(struct nvsp_message),
538 (unsigned long)init_packet,
540 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
545 wait_for_completion(&net_device->channel_init_wait);
547 if (init_packet->msg.init_msg.init_complete.status !=
551 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
554 /* NVSPv2 or later: Send NDIS config */
555 memset(init_packet, 0, sizeof(struct nvsp_message));
556 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
557 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
558 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
560 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
561 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
563 /* Teaming bit is needed to receive link speed updates */
564 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
567 ret = vmbus_sendpacket(device->channel, init_packet,
568 sizeof(struct nvsp_message),
569 (unsigned long)init_packet,
570 VM_PKT_DATA_INBAND, 0);
575 static int netvsc_connect_vsp(struct hv_device *device)
578 struct netvsc_device *net_device;
579 struct nvsp_message *init_packet;
581 const u32 ver_list[] = {
582 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
583 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
586 net_device = get_outbound_net_device(device);
590 init_packet = &net_device->channel_init_pkt;
592 /* Negotiate the latest NVSP protocol supported */
593 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
594 if (negotiate_nvsp_ver(device, net_device, init_packet,
596 net_device->nvsp_version = ver_list[i];
605 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
607 /* Send the ndis version */
608 memset(init_packet, 0, sizeof(struct nvsp_message));
610 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
611 ndis_version = 0x00060001;
613 ndis_version = 0x0006001e;
615 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
616 init_packet->msg.v1_msg.
617 send_ndis_ver.ndis_major_ver =
618 (ndis_version & 0xFFFF0000) >> 16;
619 init_packet->msg.v1_msg.
620 send_ndis_ver.ndis_minor_ver =
621 ndis_version & 0xFFFF;
623 /* Send the init request */
624 ret = vmbus_sendpacket(device->channel, init_packet,
625 sizeof(struct nvsp_message),
626 (unsigned long)init_packet,
627 VM_PKT_DATA_INBAND, 0);
631 /* Post the big receive buffer to NetVSP */
632 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
633 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
635 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
636 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
638 ret = netvsc_init_buf(device);
644 static void netvsc_disconnect_vsp(struct hv_device *device)
646 netvsc_destroy_buf(device);
650 * netvsc_device_remove - Callback when the root bus device is removed
652 void netvsc_device_remove(struct hv_device *device)
654 struct net_device *ndev = hv_get_drvdata(device);
655 struct net_device_context *net_device_ctx = netdev_priv(ndev);
656 struct netvsc_device *net_device = net_device_ctx->nvdev;
658 netvsc_disconnect_vsp(device);
660 net_device_ctx->nvdev = NULL;
663 * At this point, no one should be accessing net_device
666 dev_notice(&device->device, "net device safe to remove\n");
668 /* Now, we can close the channel safely */
669 vmbus_close(device->channel);
671 /* Release all resources */
672 vfree(net_device->sub_cb_buf);
673 free_netvsc_device(net_device);
676 #define RING_AVAIL_PERCENT_HIWATER 20
677 #define RING_AVAIL_PERCENT_LOWATER 10
680 * Get the percentage of available bytes to write in the ring.
681 * The return value is in range from 0 to 100.
683 static inline u32 hv_ringbuf_avail_percent(
684 struct hv_ring_buffer_info *ring_info)
686 u32 avail_read, avail_write;
688 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
690 return avail_write * 100 / ring_info->ring_datasize;
693 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
696 sync_change_bit(index, net_device->send_section_map);
699 static void netvsc_send_completion(struct netvsc_device *net_device,
700 struct vmbus_channel *incoming_channel,
701 struct hv_device *device,
702 struct vmpacket_descriptor *packet)
704 struct nvsp_message *nvsp_packet;
705 struct hv_netvsc_packet *nvsc_packet;
706 struct net_device *ndev = hv_get_drvdata(device);
707 struct net_device_context *net_device_ctx = netdev_priv(ndev);
711 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
712 (packet->offset8 << 3));
714 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
715 (nvsp_packet->hdr.msg_type ==
716 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
717 (nvsp_packet->hdr.msg_type ==
718 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
719 (nvsp_packet->hdr.msg_type ==
720 NVSP_MSG5_TYPE_SUBCHANNEL)) {
721 /* Copy the response back */
722 memcpy(&net_device->channel_init_pkt, nvsp_packet,
723 sizeof(struct nvsp_message));
724 complete(&net_device->channel_init_wait);
725 } else if (nvsp_packet->hdr.msg_type ==
726 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
727 int num_outstanding_sends;
729 struct vmbus_channel *channel = device->channel;
732 /* Get the send context */
733 skb = (struct sk_buff *)(unsigned long)packet->trans_id;
735 /* Notify the layer above us */
737 nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
738 send_index = nvsc_packet->send_buf_index;
739 if (send_index != NETVSC_INVALID_INDEX)
740 netvsc_free_send_slot(net_device, send_index);
741 q_idx = nvsc_packet->q_idx;
742 channel = incoming_channel;
743 dev_kfree_skb_any(skb);
746 num_outstanding_sends =
747 atomic_dec_return(&net_device->num_outstanding_sends);
748 queue_sends = atomic_dec_return(&net_device->
751 if (net_device->destroy && num_outstanding_sends == 0)
752 wake_up(&net_device->wait_drain);
754 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
755 !net_device_ctx->start_remove &&
756 (hv_ringbuf_avail_percent(&channel->outbound) >
757 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
758 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
760 netdev_err(ndev, "Unknown send completion packet type- "
761 "%d received!!\n", nvsp_packet->hdr.msg_type);
765 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
768 u32 max_words = net_device->map_words;
769 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
770 u32 section_cnt = net_device->send_section_cnt;
771 int ret_val = NETVSC_INVALID_INDEX;
775 for (i = 0; i < max_words; i++) {
778 index = ffz(map_addr[i]);
779 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
782 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
784 ret_val = (index + (i * BITS_PER_LONG));
790 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
791 unsigned int section_index,
793 struct hv_netvsc_packet *packet,
794 struct rndis_message *rndis_msg,
795 struct hv_page_buffer **pb,
798 char *start = net_device->send_buf;
799 char *dest = start + (section_index * net_device->send_section_size)
802 bool is_data_pkt = (skb != NULL) ? true : false;
803 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
806 u32 remain = packet->total_data_buflen % net_device->pkt_align;
807 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
808 packet->page_buf_cnt;
811 if (is_data_pkt && xmit_more && remain &&
812 !packet->cp_partial) {
813 padding = net_device->pkt_align - remain;
814 rndis_msg->msg_len += padding;
815 packet->total_data_buflen += padding;
818 for (i = 0; i < page_count; i++) {
819 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
820 u32 offset = (*pb)[i].offset;
821 u32 len = (*pb)[i].len;
823 memcpy(dest, (src + offset), len);
829 memset(dest, 0, padding);
836 static int netvsc_send_pkt(
837 struct hv_device *device,
838 struct hv_netvsc_packet *packet,
839 struct netvsc_device *net_device,
840 struct hv_page_buffer **pb,
843 struct nvsp_message nvmsg;
844 u16 q_idx = packet->q_idx;
845 struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
846 struct net_device *ndev = hv_get_drvdata(device);
849 struct hv_page_buffer *pgbuf;
850 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
851 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
853 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
856 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
858 /* 1 is RMC_CONTROL; */
859 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
862 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
863 packet->send_buf_index;
864 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
865 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
867 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
868 packet->total_data_buflen;
872 if (out_channel->rescind)
876 * It is possible that once we successfully place this packet
877 * on the ringbuffer, we may stop the queue. In that case, we want
878 * to notify the host independent of the xmit_more flag. We don't
879 * need to be precise here; in the worst case we may signal the host
882 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
885 if (packet->page_buf_cnt) {
886 pgbuf = packet->cp_partial ? (*pb) +
887 packet->rmsg_pgcnt : (*pb);
888 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
890 packet->page_buf_cnt,
892 sizeof(struct nvsp_message),
894 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
897 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
898 sizeof(struct nvsp_message),
901 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
906 atomic_inc(&net_device->num_outstanding_sends);
907 atomic_inc(&net_device->queue_sends[q_idx]);
909 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
910 netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
912 if (atomic_read(&net_device->
913 queue_sends[q_idx]) < 1)
914 netif_tx_wake_queue(netdev_get_tx_queue(
917 } else if (ret == -EAGAIN) {
918 netif_tx_stop_queue(netdev_get_tx_queue(
920 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
921 netif_tx_wake_queue(netdev_get_tx_queue(
926 netdev_err(ndev, "Unable to send packet %p ret %d\n",
933 /* Move packet out of multi send data (msd), and clear msd */
934 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
935 struct sk_buff **msd_skb,
936 struct multi_send_data *msdp)
938 *msd_skb = msdp->skb;
939 *msd_send = msdp->pkt;
945 int netvsc_send(struct hv_device *device,
946 struct hv_netvsc_packet *packet,
947 struct rndis_message *rndis_msg,
948 struct hv_page_buffer **pb,
951 struct netvsc_device *net_device;
952 int ret = 0, m_ret = 0;
953 struct vmbus_channel *out_channel;
954 u16 q_idx = packet->q_idx;
955 u32 pktlen = packet->total_data_buflen, msd_len = 0;
956 unsigned int section_index = NETVSC_INVALID_INDEX;
957 struct multi_send_data *msdp;
958 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
959 struct sk_buff *msd_skb = NULL;
961 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
963 net_device = get_outbound_net_device(device);
967 out_channel = net_device->chn_table[q_idx];
969 packet->send_buf_index = NETVSC_INVALID_INDEX;
970 packet->cp_partial = false;
972 /* Send control message directly without accessing msd (Multi-Send
973 * Data) field which may be changed during data packet processing.
980 msdp = &net_device->msd[q_idx];
982 /* batch packets in send buffer if possible */
984 msd_len = msdp->pkt->total_data_buflen;
986 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
989 if (try_batch && msd_len + pktlen + net_device->pkt_align <
990 net_device->send_section_size) {
991 section_index = msdp->pkt->send_buf_index;
993 } else if (try_batch && msd_len + packet->rmsg_size <
994 net_device->send_section_size) {
995 section_index = msdp->pkt->send_buf_index;
996 packet->cp_partial = true;
998 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
999 net_device->send_section_size) {
1000 section_index = netvsc_get_next_send_section(net_device);
1001 if (section_index != NETVSC_INVALID_INDEX) {
1002 move_pkt_msd(&msd_send, &msd_skb, msdp);
1007 if (section_index != NETVSC_INVALID_INDEX) {
1008 netvsc_copy_to_send_buf(net_device,
1009 section_index, msd_len,
1010 packet, rndis_msg, pb, skb);
1012 packet->send_buf_index = section_index;
1014 if (packet->cp_partial) {
1015 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1016 packet->total_data_buflen = msd_len + packet->rmsg_size;
1018 packet->page_buf_cnt = 0;
1019 packet->total_data_buflen += msd_len;
1023 dev_kfree_skb_any(msdp->skb);
1025 if (xmit_more && !packet->cp_partial) {
1036 move_pkt_msd(&msd_send, &msd_skb, msdp);
1041 m_ret = netvsc_send_pkt(device, msd_send, net_device,
1045 netvsc_free_send_slot(net_device,
1046 msd_send->send_buf_index);
1047 dev_kfree_skb_any(msd_skb);
1053 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1055 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1056 netvsc_free_send_slot(net_device, section_index);
1061 static int netvsc_send_recv_completion(struct vmbus_channel *channel,
1062 u64 transaction_id, u32 status)
1064 struct nvsp_message recvcompMessage;
1067 recvcompMessage.hdr.msg_type =
1068 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
1070 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
1072 /* Send the completion */
1073 ret = vmbus_sendpacket(channel, &recvcompMessage,
1074 sizeof(struct nvsp_message_header) + sizeof(u32),
1075 transaction_id, VM_PKT_COMP, 0);
1080 static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
1081 u32 *filled, u32 *avail)
1083 u32 first = nvdev->mrc[q_idx].first;
1084 u32 next = nvdev->mrc[q_idx].next;
1086 *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
1089 *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
1092 /* Read the first filled slot, no change to index */
1093 static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
1098 if (!nvdev->mrc[q_idx].buf)
1101 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1105 return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
1106 sizeof(struct recv_comp_data);
1109 /* Put the first filled slot back to available pool */
1110 static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
1114 nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
1115 NETVSC_RECVSLOT_MAX;
1117 num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1119 if (nvdev->destroy && num_recv == 0)
1120 wake_up(&nvdev->wait_drain);
1123 /* Check and send pending recv completions */
1124 static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1125 struct vmbus_channel *channel, u16 q_idx)
1127 struct recv_comp_data *rcd;
1131 rcd = read_recv_comp_slot(nvdev, q_idx);
1135 ret = netvsc_send_recv_completion(channel, rcd->tid,
1140 put_recv_comp_slot(nvdev, q_idx);
1144 #define NETVSC_RCD_WATERMARK 80
1146 /* Get next available slot */
1147 static inline struct recv_comp_data *get_recv_comp_slot(
1148 struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1150 u32 filled, avail, next;
1151 struct recv_comp_data *rcd;
1153 if (!nvdev->recv_section)
1156 if (!nvdev->mrc[q_idx].buf)
1159 if (atomic_read(&nvdev->num_outstanding_recvs) >
1160 nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1161 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1163 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1167 next = nvdev->mrc[q_idx].next;
1168 rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
1169 nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
1171 atomic_inc(&nvdev->num_outstanding_recvs);
1176 static void netvsc_receive(struct netvsc_device *net_device,
1177 struct vmbus_channel *channel,
1178 struct hv_device *device,
1179 struct vmpacket_descriptor *packet)
1181 struct vmtransfer_page_packet_header *vmxferpage_packet;
1182 struct nvsp_message *nvsp_packet;
1183 struct hv_netvsc_packet nv_pkt;
1184 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
1185 u32 status = NVSP_STAT_SUCCESS;
1188 struct net_device *ndev = hv_get_drvdata(device);
1191 struct recv_comp_data *rcd;
1192 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1195 * All inbound packets other than send completion should be xfer page
1198 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
1199 netdev_err(ndev, "Unknown packet type received - %d\n",
1204 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1205 (packet->offset8 << 3));
1207 /* Make sure this is a valid nvsp packet */
1208 if (nvsp_packet->hdr.msg_type !=
1209 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1210 netdev_err(ndev, "Unknown nvsp packet type received-"
1211 " %d\n", nvsp_packet->hdr.msg_type);
1215 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1217 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1218 netdev_err(ndev, "Invalid xfer page set id - "
1219 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1220 vmxferpage_packet->xfer_pageset_id);
1224 count = vmxferpage_packet->range_cnt;
1226 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1227 for (i = 0; i < count; i++) {
1228 /* Initialize the netvsc packet */
1229 data = (void *)((unsigned long)net_device->
1230 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1231 netvsc_packet->total_data_buflen =
1232 vmxferpage_packet->ranges[i].byte_count;
1234 /* Pass it to the upper layer */
1235 status = rndis_filter_receive(device, netvsc_packet, &data,
1239 if (!net_device->mrc[q_idx].buf) {
1240 ret = netvsc_send_recv_completion(channel,
1241 vmxferpage_packet->d.trans_id,
1244 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1245 q_idx, vmxferpage_packet->d.trans_id, ret);
1249 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1252 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1253 q_idx, vmxferpage_packet->d.trans_id);
1257 rcd->tid = vmxferpage_packet->d.trans_id;
1258 rcd->status = status;
1261 static void netvsc_send_table(struct hv_device *hdev,
1262 struct nvsp_message *nvmsg)
1264 struct netvsc_device *nvscdev;
1265 struct net_device *ndev = hv_get_drvdata(hdev);
1269 nvscdev = get_outbound_net_device(hdev);
1273 count = nvmsg->msg.v5_msg.send_table.count;
1274 if (count != VRSS_SEND_TAB_SIZE) {
1275 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1279 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1280 nvmsg->msg.v5_msg.send_table.offset);
1282 for (i = 0; i < count; i++)
1283 nvscdev->send_table[i] = tab[i];
1286 static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1287 struct nvsp_message *nvmsg)
1289 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1290 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1293 static inline void netvsc_receive_inband(struct hv_device *hdev,
1294 struct net_device_context *net_device_ctx,
1295 struct nvsp_message *nvmsg)
1297 switch (nvmsg->hdr.msg_type) {
1298 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1299 netvsc_send_table(hdev, nvmsg);
1302 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1303 netvsc_send_vf(net_device_ctx, nvmsg);
1308 static void netvsc_process_raw_pkt(struct hv_device *device,
1309 struct vmbus_channel *channel,
1310 struct netvsc_device *net_device,
1311 struct net_device *ndev,
1313 struct vmpacket_descriptor *desc)
1315 struct nvsp_message *nvmsg;
1316 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1318 nvmsg = (struct nvsp_message *)((unsigned long)
1319 desc + (desc->offset8 << 3));
1321 switch (desc->type) {
1323 netvsc_send_completion(net_device, channel, device, desc);
1326 case VM_PKT_DATA_USING_XFER_PAGES:
1327 netvsc_receive(net_device, channel, device, desc);
1330 case VM_PKT_DATA_INBAND:
1331 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1335 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1336 desc->type, request_id);
1341 void netvsc_channel_cb(void *context)
1344 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1345 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1346 struct hv_device *device;
1347 struct netvsc_device *net_device;
1350 struct vmpacket_descriptor *desc;
1351 unsigned char *buffer;
1352 int bufferlen = NETVSC_PACKET_SIZE;
1353 struct net_device *ndev;
1354 bool need_to_commit = false;
1356 if (channel->primary_channel != NULL)
1357 device = channel->primary_channel->device_obj;
1359 device = channel->device_obj;
1361 net_device = get_inbound_net_device(device);
1364 ndev = hv_get_drvdata(device);
1365 buffer = get_per_channel_state(channel);
1368 desc = get_next_pkt_raw(channel);
1370 netvsc_process_raw_pkt(device,
1377 put_pkt_raw(channel, desc);
1378 need_to_commit = true;
1381 if (need_to_commit) {
1382 need_to_commit = false;
1383 commit_rd_index(channel);
1386 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1387 &bytes_recvd, &request_id);
1389 if (bytes_recvd > 0) {
1390 desc = (struct vmpacket_descriptor *)buffer;
1391 netvsc_process_raw_pkt(device,
1399 * We are done for this pass.
1404 } else if (ret == -ENOBUFS) {
1405 if (bufferlen > NETVSC_PACKET_SIZE)
1407 /* Handle large packet */
1408 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1409 if (buffer == NULL) {
1410 /* Try again next time around */
1412 "unable to allocate buffer of size "
1413 "(%d)!!\n", bytes_recvd);
1417 bufferlen = bytes_recvd;
1421 if (bufferlen > NETVSC_PACKET_SIZE)
1424 netvsc_chk_recv_comp(net_device, channel, q_idx);
1428 * netvsc_device_add - Callback when the device belonging to this
1431 int netvsc_device_add(struct hv_device *device, void *additional_info)
1435 ((struct netvsc_device_info *)additional_info)->ring_size;
1436 struct netvsc_device *net_device;
1437 struct net_device *ndev = hv_get_drvdata(device);
1438 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1440 net_device = alloc_net_device();
1444 net_device->ring_size = ring_size;
1446 set_per_channel_state(device->channel, net_device->cb_buffer);
1448 /* Open the channel */
1449 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1450 ring_size * PAGE_SIZE, NULL, 0,
1451 netvsc_channel_cb, device->channel);
1454 netdev_err(ndev, "unable to open channel: %d\n", ret);
1458 /* Channel is opened */
1459 pr_info("hv_netvsc channel opened successfully\n");
1461 /* If we're reopening the device we may have multiple queues, fill the
1462 * chn_table with the default channel to use it before subchannels are
1465 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
1466 net_device->chn_table[i] = device->channel;
1468 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1473 net_device_ctx->nvdev = net_device;
1475 /* Connect with the NetVsp */
1476 ret = netvsc_connect_vsp(device);
1479 "unable to connect to NetVSP - %d\n", ret);
1486 /* Now, we can close the channel safely */
1487 vmbus_close(device->channel);
1490 free_netvsc_device(net_device);