#include "kvm/virtio-pci-dev.h"
#include "kvm/virtio-net.h"
#include "kvm/virtio.h"
-#include "kvm/ioport.h"
#include "kvm/types.h"
#include "kvm/mutex.h"
#include "kvm/util.h"
#include "kvm/kvm.h"
-#include "kvm/pci.h"
#include "kvm/irq.h"
#include "kvm/uip.h"
#include "kvm/ioeventfd.h"
#include "kvm/guest_compat.h"
+#include "kvm/virtio-pci.h"
#include <linux/virtio_net.h>
#include <linux/if_tun.h>
+#include <linux/types.h>
#include <arpa/inet.h>
#include <net/if.h>
#define VIRTIO_NET_RX_QUEUE 0
#define VIRTIO_NET_TX_QUEUE 1
-static struct pci_device_header pci_header = {
- .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET,
- .device_id = PCI_DEVICE_ID_VIRTIO_NET,
- .header_type = PCI_HEADER_TYPE_NORMAL,
- .revision_id = 0,
- .class = 0x020000,
- .subsys_vendor_id = PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET,
- .subsys_id = VIRTIO_ID_NET,
-};
-
struct net_dev;
+extern struct kvm *kvm;
+
struct net_dev_operations {
int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev);
int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev);
struct net_dev {
pthread_mutex_t mutex;
+ struct virtio_pci vpci;
struct virt_queue vqs[VIRTIO_NET_NUM_QUEUES];
struct virtio_net_config config;
- u32 host_features;
- u32 guest_features;
- u16 config_vector;
- u8 status;
- u8 isr;
- u16 queue_selector;
- u16 base_addr;
- u32 vq_vector[VIRTIO_NET_NUM_QUEUES];
- u32 gsis[VIRTIO_NET_NUM_QUEUES];
- u32 msix_io_block;
+ u32 features;
int compat_id;
- bool msix_enabled;
pthread_t io_rx_thread;
pthread_mutex_t io_rx_lock;
.config = {
.status = VIRTIO_NET_S_LINK_UP,
},
- .host_features = 1UL << VIRTIO_NET_F_MAC
- | 1UL << VIRTIO_NET_F_CSUM
- | 1UL << VIRTIO_NET_F_HOST_UFO
- | 1UL << VIRTIO_NET_F_HOST_TSO4
- | 1UL << VIRTIO_NET_F_HOST_TSO6
- | 1UL << VIRTIO_NET_F_GUEST_UFO
- | 1UL << VIRTIO_NET_F_GUEST_TSO4
- | 1UL << VIRTIO_NET_F_GUEST_TSO6,
.info = {
.buf_nr = 20,
}
virt_queue__set_used_elem(vq, head, len);
/* We should interrupt guest right now, otherwise latency is huge. */
- kvm__irq_trigger(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE]);
+ virtio_pci__signal_vq(kvm, &ndev.vpci, VIRTIO_NET_RX_QUEUE);
}
}
virt_queue__set_used_elem(vq, head, len);
}
- kvm__irq_trigger(kvm, ndev.gsis[VIRTIO_NET_TX_QUEUE]);
+ virtio_pci__signal_vq(kvm, &ndev.vpci, VIRTIO_NET_TX_QUEUE);
}
pthread_exit(NULL);
}
-static bool virtio_net_pci_io_device_specific_out(struct kvm *kvm, void *data,
- unsigned long offset, int size)
-{
- u8 *config_space = (u8 *)&ndev.config;
- int type;
- u32 config_offset;
-
- type = virtio__get_dev_specific_field(offset - 20, ndev.msix_enabled, 0, &config_offset);
- if (type == VIRTIO_PCI_O_MSIX) {
- if (offset == VIRTIO_MSI_CONFIG_VECTOR) {
- ndev.config_vector = ioport__read16(data);
- } else {
- u32 gsi;
- u32 vec;
-
- vec = ndev.vq_vector[ndev.queue_selector] = ioport__read16(data);
-
- gsi = irq__add_msix_route(kvm,
- pci_header.msix.table[vec].low,
- pci_header.msix.table[vec].high,
- pci_header.msix.table[vec].data);
-
- ndev.gsis[ndev.queue_selector] = gsi;
- }
- return true;
- }
-
- if (size != 1)
- return false;
-
- if ((config_offset) > sizeof(struct virtio_net_config))
- pr_error("config offset is too big: %u", config_offset);
-
- config_space[config_offset] = *(u8 *)data;
-
- return true;
-}
-
-static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offset, int size)
-{
- u8 *config_space = (u8 *)&ndev.config;
- int type;
- u32 config_offset;
-
- type = virtio__get_dev_specific_field(offset - 20, ndev.msix_enabled, 0, &config_offset);
- if (type == VIRTIO_PCI_O_MSIX) {
- if (offset == VIRTIO_MSI_CONFIG_VECTOR)
- ioport__write16(data, ndev.config_vector);
- else
- ioport__write16(data, ndev.vq_vector[ndev.queue_selector]);
-
- return true;
- }
-
- if (size != 1)
- return false;
-
- if ((config_offset) > sizeof(struct virtio_net_config))
- pr_error("config offset is too big: %u", config_offset);
-
- ioport__write8(data, config_space[config_offset]);
-
- return true;
-}
-
-static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
-{
- unsigned long offset = port - ndev.base_addr;
- bool ret = true;
-
- mutex_lock(&ndev.mutex);
-
- switch (offset) {
- case VIRTIO_PCI_HOST_FEATURES:
- ioport__write32(data, ndev.host_features);
- break;
- case VIRTIO_PCI_GUEST_FEATURES:
- ret = false;
- break;
- case VIRTIO_PCI_QUEUE_PFN:
- ioport__write32(data, ndev.vqs[ndev.queue_selector].pfn);
- break;
- case VIRTIO_PCI_QUEUE_NUM:
- ioport__write16(data, VIRTIO_NET_QUEUE_SIZE);
- break;
- case VIRTIO_PCI_QUEUE_SEL:
- case VIRTIO_PCI_QUEUE_NOTIFY:
- ret = false;
- break;
- case VIRTIO_PCI_STATUS:
- ioport__write8(data, ndev.status);
- break;
- case VIRTIO_PCI_ISR:
- ioport__write8(data, ndev.isr);
- kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW);
- ndev.isr = VIRTIO_IRQ_LOW;
- break;
- default:
- ret = virtio_net_pci_io_device_specific_in(data, offset, size);
- };
-
- mutex_unlock(&ndev.mutex);
-
- return ret;
-}
-
static void virtio_net_handle_callback(struct kvm *kvm, u16 queue_index)
{
switch (queue_index) {
}
}
-static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
-{
- unsigned long offset = port - ndev.base_addr;
- bool ret = true;
-
- mutex_lock(&ndev.mutex);
-
- switch (offset) {
- case VIRTIO_PCI_GUEST_FEATURES:
- ndev.guest_features = ioport__read32(data);
- break;
- case VIRTIO_PCI_QUEUE_PFN: {
- struct virt_queue *queue;
- void *p;
-
- assert(ndev.queue_selector < VIRTIO_NET_NUM_QUEUES);
-
- compat__remove_message(ndev.compat_id);
-
- queue = &ndev.vqs[ndev.queue_selector];
- queue->pfn = ioport__read32(data);
- p = guest_pfn_to_host(kvm, queue->pfn);
-
- vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
-
- break;
- }
- case VIRTIO_PCI_QUEUE_SEL:
- ndev.queue_selector = ioport__read16(data);
- break;
- case VIRTIO_PCI_QUEUE_NOTIFY: {
- u16 queue_index;
-
- queue_index = ioport__read16(data);
- virtio_net_handle_callback(kvm, queue_index);
- break;
- }
- case VIRTIO_PCI_STATUS:
- ndev.status = ioport__read8(data);
- break;
- default:
- ret = virtio_net_pci_io_device_specific_out(kvm, data, offset, size);
- };
-
- mutex_unlock(&ndev.mutex);
-
- return ret;
-}
-
static void ioevent_callback(struct kvm *kvm, void *param)
{
virtio_net_handle_callback(kvm, (u64)(long)param);
}
-static struct ioport_operations virtio_net_io_ops = {
- .io_in = virtio_net_pci_io_in,
- .io_out = virtio_net_pci_io_out,
-};
-
-static void callback_mmio(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr)
-{
- void *table = pci_header.msix.table;
- if (is_write)
- memcpy(table + addr - ndev.msix_io_block, data, len);
- else
- memcpy(data, table + addr - ndev.msix_io_block, len);
-
- ndev.msix_enabled = 1;
-}
-
static bool virtio_net__tap_init(const struct virtio_net_parameters *params)
{
int sock = socket(AF_INET, SOCK_STREAM, 0);
.tx = uip_ops_tx,
};
-void virtio_net__init(const struct virtio_net_parameters *params)
+static void set_config(struct kvm *kvm, void *dev, u8 data, u32 offset)
+{
+ struct net_dev *ndev = dev;
+
+ ((u8 *)(&ndev->config))[offset] = data;
+}
+
+static u8 get_config(struct kvm *kvm, void *dev, u32 offset)
+{
+ struct net_dev *ndev = dev;
+
+ return ((u8 *)(&ndev->config))[offset];
+}
+
+static u32 get_host_features(struct kvm *kvm, void *dev)
+{
+ return 1UL << VIRTIO_NET_F_MAC
+ | 1UL << VIRTIO_NET_F_CSUM
+ | 1UL << VIRTIO_NET_F_HOST_UFO
+ | 1UL << VIRTIO_NET_F_HOST_TSO4
+ | 1UL << VIRTIO_NET_F_HOST_TSO6
+ | 1UL << VIRTIO_NET_F_GUEST_UFO
+ | 1UL << VIRTIO_NET_F_GUEST_TSO4
+ | 1UL << VIRTIO_NET_F_GUEST_TSO6;
+}
+
+static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
+{
+ struct net_dev *ndev = dev;
+
+ ndev->features = features;
+}
+
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
{
+ struct net_dev *ndev = dev;
+ struct virt_queue *queue;
+ void *p;
struct ioevent ioevent;
- u8 dev, line, pin;
- u16 net_base_addr;
- int i;
- if (irq__register_device(VIRTIO_ID_NET, &dev, &pin, &line) < 0)
- return;
+ compat__remove_message(ndev->compat_id);
+
+ queue = &ndev->vqs[vq];
+ queue->pfn = pfn;
+ p = guest_pfn_to_host(kvm, queue->pfn);
+
+ vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+
+ ioevent = (struct ioevent) {
+ .io_addr = ndev->vpci.base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .io_len = sizeof(u16),
+ .fn = ioevent_callback,
+ .fn_ptr = (void *)(u64)vq,
+ .datamatch = vq,
+ .fn_kvm = kvm,
+ .fd = eventfd(0, 0),
+ };
+
+ ioeventfd__add_event(&ioevent);
+
+ return 0;
+}
- pci_header.irq_pin = pin;
- pci_header.irq_line = line;
- net_base_addr = ioport__register(IOPORT_EMPTY, &virtio_net_io_ops, IOPORT_SIZE, NULL);
- pci_header.bar[0] = net_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
- ndev.base_addr = net_base_addr;
- pci__register(&pci_header, dev);
+static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ virtio_net_handle_callback(kvm, vq);
+
+ return 0;
+}
+
+static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ struct net_dev *ndev = dev;
+
+ return ndev->vqs[vq].pfn;
+}
+
+static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ return VIRTIO_NET_QUEUE_SIZE;
+}
+
+void virtio_net__init(const struct virtio_net_parameters *params)
+{
+ int i;
for (i = 0 ; i < 6 ; i++) {
ndev.config.mac[i] = params->guest_mac[i];
ndev.ops = &uip_ops;
}
- ndev.msix_io_block = pci_get_io_space_block();
- kvm__register_mmio(params->kvm, ndev.msix_io_block, 0x100, callback_mmio, NULL);
- pci_header.bar[1] = ndev.msix_io_block |
- PCI_BASE_ADDRESS_SPACE_MEMORY |
- PCI_BASE_ADDRESS_MEM_TYPE_64;
- /* bar[2] is the continuation of bar[1] for 64bit addressing */
- pci_header.bar[2] = 0;
- pci_header.status = PCI_STATUS_CAP_LIST;
- pci_header.capabilities = (void *)&pci_header.msix - (void *)&pci_header;
-
- pci_header.msix.cap = PCI_CAP_ID_MSIX;
- pci_header.msix.next = 0;
- pci_header.msix.table_size = (VIRTIO_NET_NUM_QUEUES + 1) | PCI_MSIX_FLAGS_ENABLE;
- pci_header.msix.table_offset = 1; /* Use BAR 1 */
+ virtio_pci__init(kvm, &ndev.vpci, &ndev, PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET);
+ ndev.vpci.ops = (struct virtio_pci_ops) {
+ .set_config = set_config,
+ .get_config = get_config,
+ .get_host_features = get_host_features,
+ .set_guest_features = set_guest_features,
+ .init_vq = init_vq,
+ .notify_vq = notify_vq,
+ .get_pfn_vq = get_pfn_vq,
+ .get_size_vq = get_size_vq,
+ };
virtio_net__io_thread_init(params->kvm);
- for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
- ioevent = (struct ioevent) {
- .io_addr = net_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
- .io_len = sizeof(u16),
- .fn = ioevent_callback,
- .datamatch = i,
- .fn_ptr = (void *)(long)i,
- .fn_kvm = params->kvm,
- .fd = eventfd(0, 0),
- };
-
- ioeventfd__add_event(&ioevent);
- }
-
ndev.compat_id = compat__add_message("virtio-net device was not detected",
"While you have requested a virtio-net device, "
"the guest kernel didn't seem to detect it.\n"