2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <asm/sync_bitops.h>
33 #include "hyperv_net.h"
36 static struct netvsc_device *alloc_net_device(struct hv_device *device)
38 struct netvsc_device *net_device;
39 struct net_device *ndev = hv_get_drvdata(device);
41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
45 init_waitqueue_head(&net_device->wait_drain);
46 net_device->start_remove = false;
47 net_device->destroy = false;
48 net_device->dev = device;
49 net_device->ndev = ndev;
51 hv_set_drvdata(device, net_device);
55 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
57 struct netvsc_device *net_device;
59 net_device = hv_get_drvdata(device);
60 if (net_device && net_device->destroy)
66 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
68 struct netvsc_device *net_device;
70 net_device = hv_get_drvdata(device);
75 if (net_device->destroy &&
76 atomic_read(&net_device->num_outstanding_sends) == 0)
84 static int netvsc_destroy_buf(struct netvsc_device *net_device)
86 struct nvsp_message *revoke_packet;
88 struct net_device *ndev = net_device->ndev;
91 * If we got a section count, it means we received a
92 * SendReceiveBufferComplete msg (ie sent
93 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
94 * to send a revoke msg here
96 if (net_device->recv_section_cnt) {
97 /* Send the revoke receive buffer */
98 revoke_packet = &net_device->revoke_packet;
99 memset(revoke_packet, 0, sizeof(struct nvsp_message));
101 revoke_packet->hdr.msg_type =
102 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
103 revoke_packet->msg.v1_msg.
104 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
106 ret = vmbus_sendpacket(net_device->dev->channel,
108 sizeof(struct nvsp_message),
109 (unsigned long)revoke_packet,
110 VM_PKT_DATA_INBAND, 0);
112 * If we failed here, we might as well return and
113 * have a leak rather than continue and a bugchk
116 netdev_err(ndev, "unable to send "
117 "revoke receive buffer to netvsp\n");
122 /* Teardown the gpadl on the vsp end */
123 if (net_device->recv_buf_gpadl_handle) {
124 ret = vmbus_teardown_gpadl(net_device->dev->channel,
125 net_device->recv_buf_gpadl_handle);
127 /* If we failed here, we might as well return and have a leak
128 * rather than continue and a bugchk
132 "unable to teardown receive buffer's gpadl\n");
135 net_device->recv_buf_gpadl_handle = 0;
138 if (net_device->recv_buf) {
139 /* Free up the receive buffer */
140 vfree(net_device->recv_buf);
141 net_device->recv_buf = NULL;
144 if (net_device->recv_section) {
145 net_device->recv_section_cnt = 0;
146 kfree(net_device->recv_section);
147 net_device->recv_section = NULL;
150 /* Deal with the send buffer we may have setup.
151 * If we got a send section size, it means we received a
152 * SendsendBufferComplete msg (ie sent
153 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
154 * to send a revoke msg here
156 if (net_device->send_section_size) {
157 /* Send the revoke receive buffer */
158 revoke_packet = &net_device->revoke_packet;
159 memset(revoke_packet, 0, sizeof(struct nvsp_message));
161 revoke_packet->hdr.msg_type =
162 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
163 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
165 ret = vmbus_sendpacket(net_device->dev->channel,
167 sizeof(struct nvsp_message),
168 (unsigned long)revoke_packet,
169 VM_PKT_DATA_INBAND, 0);
170 /* If we failed here, we might as well return and
171 * have a leak rather than continue and a bugchk
174 netdev_err(ndev, "unable to send "
175 "revoke send buffer to netvsp\n");
179 /* Teardown the gpadl on the vsp end */
180 if (net_device->send_buf_gpadl_handle) {
181 ret = vmbus_teardown_gpadl(net_device->dev->channel,
182 net_device->send_buf_gpadl_handle);
184 /* If we failed here, we might as well return and have a leak
185 * rather than continue and a bugchk
189 "unable to teardown send buffer's gpadl\n");
192 net_device->recv_buf_gpadl_handle = 0;
194 if (net_device->send_buf) {
195 /* Free up the receive buffer */
196 free_pages((unsigned long)net_device->send_buf,
197 get_order(net_device->send_buf_size));
198 net_device->send_buf = NULL;
200 kfree(net_device->send_section_map);
205 static int netvsc_init_buf(struct hv_device *device)
209 struct netvsc_device *net_device;
210 struct nvsp_message *init_packet;
211 struct net_device *ndev;
213 net_device = get_outbound_net_device(device);
216 ndev = net_device->ndev;
218 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
219 if (!net_device->recv_buf) {
220 netdev_err(ndev, "unable to allocate receive "
221 "buffer of size %d\n", net_device->recv_buf_size);
227 * Establish the gpadl handle for this buffer on this
228 * channel. Note: This call uses the vmbus connection rather
229 * than the channel to establish the gpadl handle.
231 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
232 net_device->recv_buf_size,
233 &net_device->recv_buf_gpadl_handle);
236 "unable to establish receive buffer's gpadl\n");
241 /* Notify the NetVsp of the gpadl handle */
242 init_packet = &net_device->channel_init_pkt;
244 memset(init_packet, 0, sizeof(struct nvsp_message));
246 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
247 init_packet->msg.v1_msg.send_recv_buf.
248 gpadl_handle = net_device->recv_buf_gpadl_handle;
249 init_packet->msg.v1_msg.
250 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
252 /* Send the gpadl notification request */
253 ret = vmbus_sendpacket(device->channel, init_packet,
254 sizeof(struct nvsp_message),
255 (unsigned long)init_packet,
257 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
260 "unable to send receive buffer's gpadl to netvsp\n");
264 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
268 /* Check the response */
269 if (init_packet->msg.v1_msg.
270 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
271 netdev_err(ndev, "Unable to complete receive buffer "
272 "initialization with NetVsp - status %d\n",
273 init_packet->msg.v1_msg.
274 send_recv_buf_complete.status);
279 /* Parse the response */
281 net_device->recv_section_cnt = init_packet->msg.
282 v1_msg.send_recv_buf_complete.num_sections;
284 net_device->recv_section = kmemdup(
285 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
286 net_device->recv_section_cnt *
287 sizeof(struct nvsp_1_receive_buffer_section),
289 if (net_device->recv_section == NULL) {
295 * For 1st release, there should only be 1 section that represents the
296 * entire receive buffer
298 if (net_device->recv_section_cnt != 1 ||
299 net_device->recv_section->offset != 0) {
304 /* Now setup the send buffer.
306 net_device->send_buf =
307 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
308 get_order(net_device->send_buf_size));
309 if (!net_device->send_buf) {
310 netdev_err(ndev, "unable to allocate send "
311 "buffer of size %d\n", net_device->send_buf_size);
316 /* Establish the gpadl handle for this buffer on this
317 * channel. Note: This call uses the vmbus connection rather
318 * than the channel to establish the gpadl handle.
320 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
321 net_device->send_buf_size,
322 &net_device->send_buf_gpadl_handle);
325 "unable to establish send buffer's gpadl\n");
329 /* Notify the NetVsp of the gpadl handle */
330 init_packet = &net_device->channel_init_pkt;
331 memset(init_packet, 0, sizeof(struct nvsp_message));
332 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
333 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
334 net_device->send_buf_gpadl_handle;
335 init_packet->msg.v1_msg.send_recv_buf.id = 0;
337 /* Send the gpadl notification request */
338 ret = vmbus_sendpacket(device->channel, init_packet,
339 sizeof(struct nvsp_message),
340 (unsigned long)init_packet,
342 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
345 "unable to send send buffer's gpadl to netvsp\n");
349 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
352 /* Check the response */
353 if (init_packet->msg.v1_msg.
354 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
355 netdev_err(ndev, "Unable to complete send buffer "
356 "initialization with NetVsp - status %d\n",
357 init_packet->msg.v1_msg.
358 send_recv_buf_complete.status);
363 /* Parse the response */
364 net_device->send_section_size = init_packet->msg.
365 v1_msg.send_send_buf_complete.section_size;
367 /* Section count is simply the size divided by the section size.
369 net_device->send_section_cnt =
370 net_device->send_buf_size/net_device->send_section_size;
372 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
373 net_device->send_section_size, net_device->send_section_cnt);
375 /* Setup state for managing the send buffer. */
376 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL)
387 netvsc_destroy_buf(net_device);
394 /* Negotiate NVSP protocol version */
395 static int negotiate_nvsp_ver(struct hv_device *device,
396 struct netvsc_device *net_device,
397 struct nvsp_message *init_packet,
402 memset(init_packet, 0, sizeof(struct nvsp_message));
403 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
404 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
405 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
407 /* Send the init request */
408 ret = vmbus_sendpacket(device->channel, init_packet,
409 sizeof(struct nvsp_message),
410 (unsigned long)init_packet,
412 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
417 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
422 if (init_packet->msg.init_msg.init_complete.status !=
426 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
429 /* NVSPv2 only: Send NDIS config */
430 memset(init_packet, 0, sizeof(struct nvsp_message));
431 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
432 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
433 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
435 ret = vmbus_sendpacket(device->channel, init_packet,
436 sizeof(struct nvsp_message),
437 (unsigned long)init_packet,
438 VM_PKT_DATA_INBAND, 0);
443 static int netvsc_connect_vsp(struct hv_device *device)
446 struct netvsc_device *net_device;
447 struct nvsp_message *init_packet;
449 struct net_device *ndev;
450 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
451 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
452 int i, num_ver = 4; /* number of different NVSP versions */
454 net_device = get_outbound_net_device(device);
457 ndev = net_device->ndev;
459 init_packet = &net_device->channel_init_pkt;
461 /* Negotiate the latest NVSP protocol supported */
462 for (i = num_ver - 1; i >= 0; i--)
463 if (negotiate_nvsp_ver(device, net_device, init_packet,
465 net_device->nvsp_version = ver_list[i];
474 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
476 /* Send the ndis version */
477 memset(init_packet, 0, sizeof(struct nvsp_message));
479 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
480 ndis_version = 0x00060001;
482 ndis_version = 0x0006001e;
484 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
485 init_packet->msg.v1_msg.
486 send_ndis_ver.ndis_major_ver =
487 (ndis_version & 0xFFFF0000) >> 16;
488 init_packet->msg.v1_msg.
489 send_ndis_ver.ndis_minor_ver =
490 ndis_version & 0xFFFF;
492 /* Send the init request */
493 ret = vmbus_sendpacket(device->channel, init_packet,
494 sizeof(struct nvsp_message),
495 (unsigned long)init_packet,
496 VM_PKT_DATA_INBAND, 0);
500 /* Post the big receive buffer to NetVSP */
501 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
502 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
504 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
505 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
507 ret = netvsc_init_buf(device);
513 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
515 netvsc_destroy_buf(net_device);
519 * netvsc_device_remove - Callback when the root bus device is removed
521 int netvsc_device_remove(struct hv_device *device)
523 struct netvsc_device *net_device;
526 net_device = hv_get_drvdata(device);
528 netvsc_disconnect_vsp(net_device);
531 * Since we have already drained, we don't need to busy wait
532 * as was done in final_release_stor_device()
533 * Note that we cannot set the ext pointer to NULL until
534 * we have drained - to drain the outgoing packets, we need to
535 * allow incoming packets.
538 spin_lock_irqsave(&device->channel->inbound_lock, flags);
539 hv_set_drvdata(device, NULL);
540 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
543 * At this point, no one should be accessing net_device
546 dev_notice(&device->device, "net device safe to remove\n");
548 /* Now, we can close the channel safely */
549 vmbus_close(device->channel);
551 /* Release all resources */
552 if (net_device->sub_cb_buf)
553 vfree(net_device->sub_cb_buf);
560 #define RING_AVAIL_PERCENT_HIWATER 20
561 #define RING_AVAIL_PERCENT_LOWATER 10
564 * Get the percentage of available bytes to write in the ring.
565 * The return value is in range from 0 to 100.
567 static inline u32 hv_ringbuf_avail_percent(
568 struct hv_ring_buffer_info *ring_info)
570 u32 avail_read, avail_write;
572 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
574 return avail_write * 100 / ring_info->ring_datasize;
577 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
580 sync_change_bit(index, net_device->send_section_map);
583 static void netvsc_send_completion(struct netvsc_device *net_device,
584 struct hv_device *device,
585 struct vmpacket_descriptor *packet)
587 struct nvsp_message *nvsp_packet;
588 struct hv_netvsc_packet *nvsc_packet;
589 struct net_device *ndev;
592 ndev = net_device->ndev;
594 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
595 (packet->offset8 << 3));
597 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
598 (nvsp_packet->hdr.msg_type ==
599 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
600 (nvsp_packet->hdr.msg_type ==
601 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
602 (nvsp_packet->hdr.msg_type ==
603 NVSP_MSG5_TYPE_SUBCHANNEL)) {
604 /* Copy the response back */
605 memcpy(&net_device->channel_init_pkt, nvsp_packet,
606 sizeof(struct nvsp_message));
607 complete(&net_device->channel_init_wait);
608 } else if (nvsp_packet->hdr.msg_type ==
609 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
610 int num_outstanding_sends;
612 struct vmbus_channel *channel = device->channel;
615 /* Get the send context */
616 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
619 /* Notify the layer above us */
621 send_index = nvsc_packet->send_buf_index;
622 if (send_index != NETVSC_INVALID_INDEX)
623 netvsc_free_send_slot(net_device, send_index);
624 q_idx = nvsc_packet->q_idx;
625 channel = nvsc_packet->channel;
626 nvsc_packet->send_completion(nvsc_packet->
627 send_completion_ctx);
630 num_outstanding_sends =
631 atomic_dec_return(&net_device->num_outstanding_sends);
632 queue_sends = atomic_dec_return(&net_device->
635 if (net_device->destroy && num_outstanding_sends == 0)
636 wake_up(&net_device->wait_drain);
638 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
639 !net_device->start_remove &&
640 (hv_ringbuf_avail_percent(&channel->outbound) >
641 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
642 netif_tx_wake_queue(netdev_get_tx_queue(
645 netdev_err(ndev, "Unknown send completion packet type- "
646 "%d received!!\n", nvsp_packet->hdr.msg_type);
651 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
654 u32 max_words = net_device->map_words;
655 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
656 u32 section_cnt = net_device->send_section_cnt;
657 int ret_val = NETVSC_INVALID_INDEX;
661 for (i = 0; i < max_words; i++) {
664 index = ffz(map_addr[i]);
665 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
668 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
670 ret_val = (index + (i * BITS_PER_LONG));
676 u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
677 unsigned int section_index,
678 struct hv_netvsc_packet *packet)
680 char *start = net_device->send_buf;
681 char *dest = (start + (section_index * net_device->send_section_size));
685 for (i = 0; i < packet->page_buf_cnt; i++) {
686 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
687 u32 offset = packet->page_buf[i].offset;
688 u32 len = packet->page_buf[i].len;
690 memcpy(dest, (src + offset), len);
697 int netvsc_send(struct hv_device *device,
698 struct hv_netvsc_packet *packet)
700 struct netvsc_device *net_device;
702 struct nvsp_message sendMessage;
703 struct net_device *ndev;
704 struct vmbus_channel *out_channel = NULL;
706 unsigned int section_index = NETVSC_INVALID_INDEX;
711 net_device = get_outbound_net_device(device);
714 ndev = net_device->ndev;
716 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
717 if (packet->is_data_pkt) {
719 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
721 /* 1 is RMC_CONTROL; */
722 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
725 /* Attempt to send via sendbuf */
726 if (packet->total_data_buflen < net_device->send_section_size) {
727 section_index = netvsc_get_next_send_section(net_device);
728 if (section_index != NETVSC_INVALID_INDEX) {
729 msg_size = netvsc_copy_to_send_buf(net_device,
732 skb = (struct sk_buff *)
733 (unsigned long)packet->send_completion_tid;
735 dev_kfree_skb_any(skb);
736 packet->page_buf_cnt = 0;
739 packet->send_buf_index = section_index;
742 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
744 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
746 if (packet->send_completion)
747 req_id = (ulong)packet;
751 out_channel = net_device->chn_table[packet->q_idx];
752 if (out_channel == NULL)
753 out_channel = device->channel;
754 packet->channel = out_channel;
756 if (packet->page_buf_cnt) {
757 ret = vmbus_sendpacket_pagebuffer(out_channel,
759 packet->page_buf_cnt,
761 sizeof(struct nvsp_message),
764 ret = vmbus_sendpacket(out_channel, &sendMessage,
765 sizeof(struct nvsp_message),
768 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
772 atomic_inc(&net_device->num_outstanding_sends);
773 atomic_inc(&net_device->queue_sends[packet->q_idx]);
775 if (hv_ringbuf_avail_percent(&out_channel->outbound) <
776 RING_AVAIL_PERCENT_LOWATER) {
777 netif_tx_stop_queue(netdev_get_tx_queue(
778 ndev, packet->q_idx));
780 if (atomic_read(&net_device->
781 queue_sends[packet->q_idx]) < 1)
782 netif_tx_wake_queue(netdev_get_tx_queue(
783 ndev, packet->q_idx));
785 } else if (ret == -EAGAIN) {
786 netif_tx_stop_queue(netdev_get_tx_queue(
787 ndev, packet->q_idx));
788 if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
789 netif_tx_wake_queue(netdev_get_tx_queue(
790 ndev, packet->q_idx));
794 netdev_err(ndev, "Unable to send packet %p ret %d\n",
801 static void netvsc_send_recv_completion(struct hv_device *device,
802 struct vmbus_channel *channel,
803 struct netvsc_device *net_device,
804 u64 transaction_id, u32 status)
806 struct nvsp_message recvcompMessage;
809 struct net_device *ndev;
811 ndev = net_device->ndev;
813 recvcompMessage.hdr.msg_type =
814 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
816 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
819 /* Send the completion */
820 ret = vmbus_sendpacket(channel, &recvcompMessage,
821 sizeof(struct nvsp_message), transaction_id,
826 } else if (ret == -EAGAIN) {
827 /* no more room...wait a bit and attempt to retry 3 times */
829 netdev_err(ndev, "unable to send receive completion pkt"
830 " (tid %llx)...retrying %d\n", transaction_id, retries);
834 goto retry_send_cmplt;
836 netdev_err(ndev, "unable to send receive "
837 "completion pkt (tid %llx)...give up retrying\n",
841 netdev_err(ndev, "unable to send receive "
842 "completion pkt - %llx\n", transaction_id);
846 static void netvsc_receive(struct netvsc_device *net_device,
847 struct vmbus_channel *channel,
848 struct hv_device *device,
849 struct vmpacket_descriptor *packet)
851 struct vmtransfer_page_packet_header *vmxferpage_packet;
852 struct nvsp_message *nvsp_packet;
853 struct hv_netvsc_packet nv_pkt;
854 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
855 u32 status = NVSP_STAT_SUCCESS;
858 struct net_device *ndev;
860 ndev = net_device->ndev;
863 * All inbound packets other than send completion should be xfer page
866 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
867 netdev_err(ndev, "Unknown packet type received - %d\n",
872 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
873 (packet->offset8 << 3));
875 /* Make sure this is a valid nvsp packet */
876 if (nvsp_packet->hdr.msg_type !=
877 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
878 netdev_err(ndev, "Unknown nvsp packet type received-"
879 " %d\n", nvsp_packet->hdr.msg_type);
883 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
885 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
886 netdev_err(ndev, "Invalid xfer page set id - "
887 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
888 vmxferpage_packet->xfer_pageset_id);
892 count = vmxferpage_packet->range_cnt;
893 netvsc_packet->device = device;
894 netvsc_packet->channel = channel;
896 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
897 for (i = 0; i < count; i++) {
898 /* Initialize the netvsc packet */
899 netvsc_packet->status = NVSP_STAT_SUCCESS;
900 netvsc_packet->data = (void *)((unsigned long)net_device->
901 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
902 netvsc_packet->total_data_buflen =
903 vmxferpage_packet->ranges[i].byte_count;
905 /* Pass it to the upper layer */
906 rndis_filter_receive(device, netvsc_packet);
908 if (netvsc_packet->status != NVSP_STAT_SUCCESS)
909 status = NVSP_STAT_FAIL;
912 netvsc_send_recv_completion(device, channel, net_device,
913 vmxferpage_packet->d.trans_id, status);
917 static void netvsc_send_table(struct hv_device *hdev,
918 struct vmpacket_descriptor *vmpkt)
920 struct netvsc_device *nvscdev;
921 struct net_device *ndev;
922 struct nvsp_message *nvmsg;
926 nvscdev = get_outbound_net_device(hdev);
929 ndev = nvscdev->ndev;
931 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
932 (vmpkt->offset8 << 3));
934 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
937 count = nvmsg->msg.v5_msg.send_table.count;
938 if (count != VRSS_SEND_TAB_SIZE) {
939 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
943 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
944 nvmsg->msg.v5_msg.send_table.offset);
946 for (i = 0; i < count; i++)
947 nvscdev->send_table[i] = tab[i];
950 void netvsc_channel_cb(void *context)
953 struct vmbus_channel *channel = (struct vmbus_channel *)context;
954 struct hv_device *device;
955 struct netvsc_device *net_device;
958 struct vmpacket_descriptor *desc;
959 unsigned char *buffer;
960 int bufferlen = NETVSC_PACKET_SIZE;
961 struct net_device *ndev;
963 if (channel->primary_channel != NULL)
964 device = channel->primary_channel->device_obj;
966 device = channel->device_obj;
968 net_device = get_inbound_net_device(device);
971 ndev = net_device->ndev;
972 buffer = get_per_channel_state(channel);
975 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
976 &bytes_recvd, &request_id);
978 if (bytes_recvd > 0) {
979 desc = (struct vmpacket_descriptor *)buffer;
980 switch (desc->type) {
982 netvsc_send_completion(net_device,
986 case VM_PKT_DATA_USING_XFER_PAGES:
987 netvsc_receive(net_device, channel,
991 case VM_PKT_DATA_INBAND:
992 netvsc_send_table(device, desc);
997 "unhandled packet type %d, "
999 desc->type, request_id,
1006 * We are done for this pass.
1011 } else if (ret == -ENOBUFS) {
1012 if (bufferlen > NETVSC_PACKET_SIZE)
1014 /* Handle large packet */
1015 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1016 if (buffer == NULL) {
1017 /* Try again next time around */
1019 "unable to allocate buffer of size "
1020 "(%d)!!\n", bytes_recvd);
1024 bufferlen = bytes_recvd;
1028 if (bufferlen > NETVSC_PACKET_SIZE)
1034 * netvsc_device_add - Callback when the device belonging to this
1037 int netvsc_device_add(struct hv_device *device, void *additional_info)
1041 ((struct netvsc_device_info *)additional_info)->ring_size;
1042 struct netvsc_device *net_device;
1043 struct net_device *ndev;
1045 net_device = alloc_net_device(device);
1051 net_device->ring_size = ring_size;
1054 * Coming into this function, struct net_device * is
1055 * registered as the driver private data.
1056 * In alloc_net_device(), we register struct netvsc_device *
1057 * as the driver private data and stash away struct net_device *
1058 * in struct netvsc_device *.
1060 ndev = net_device->ndev;
1062 /* Initialize the NetVSC channel extension */
1063 init_completion(&net_device->channel_init_wait);
1065 set_per_channel_state(device->channel, net_device->cb_buffer);
1067 /* Open the channel */
1068 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1069 ring_size * PAGE_SIZE, NULL, 0,
1070 netvsc_channel_cb, device->channel);
1073 netdev_err(ndev, "unable to open channel: %d\n", ret);
1077 /* Channel is opened */
1078 pr_info("hv_netvsc channel opened successfully\n");
1080 net_device->chn_table[0] = device->channel;
1082 /* Connect with the NetVsp */
1083 ret = netvsc_connect_vsp(device);
1086 "unable to connect to NetVSP - %d\n", ret);
1093 /* Now, we can close the channel safely */
1094 vmbus_close(device->channel);