if (r < 0)
pr_warning("virtio_blk__exit() failed with error %d\n", r);
- virtio_rng__delete_all(kvm);
+ r = virtio_rng__exit(kvm);
+ if (r < 0)
+ pr_warning("virtio_rng__exit() failed with error %d\n", r);
r = disk_image__close_all(kvm->disks, image_count);
if (r < 0)
if (r < 0)
pr_warning("pci__exit() failed with error %d\n", r);
- kvm__delete(kvm);
+ r = kvm__exit(kvm);
+ if (r < 0)
+ pr_warning("pci__exit() failed with error %d\n", r);
if (guest_ret == 0)
printf("\n # KVM session ended normally.\n");
int kvm__recommended_cpus(struct kvm *kvm);
int kvm__max_cpus(struct kvm *kvm);
void kvm__init_ram(struct kvm *kvm);
-void kvm__delete(struct kvm *kvm);
+int kvm__exit(struct kvm *kvm);
bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
const char *initrd_filename, const char *kernel_cmdline, u16 vidmode);
void kvm__start_timer(struct kvm *kvm);
void kvm__irq_trigger(struct kvm *kvm, int irq);
bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
-void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
-bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, bool coalesce,
+int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
+int kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, bool coalesce,
void (*mmio_fn)(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr),
void *ptr);
bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
int virtio_pci__init(struct kvm *kvm, struct virtio_trans *vtrans, void *dev,
int device_id, int subsys_id, int class);
+int virtio_pci__exit(struct kvm *kvm, struct virtio_trans *vtrans);
int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_trans *vtrans, u32 vq);
int virtio_pci__signal_config(struct kvm *kvm, struct virtio_trans *vtrans);
struct kvm;
-void virtio_rng__init(struct kvm *kvm);
-void virtio_rng__delete_all(struct kvm *kvm);
+int virtio_rng__init(struct kvm *kvm);
+int virtio_rng__exit(struct kvm *kvm);
#endif /* KVM__RNG_VIRTIO_H */
struct virtio_trans_ops {
int (*init)(struct kvm *kvm, struct virtio_trans *vtrans, void *dev, int device_id,
int subsys_id, int class);
+ int (*uninit)(struct kvm *kvm, struct virtio_trans *vtrans);
int (*signal_vq)(struct kvm *kvm, struct virtio_trans *virtio_trans, u32 queueid);
int (*signal_config)(struct kvm *kvm, struct virtio_trans *virtio_trans);
};
int ioport__register(u16 port, struct ioport_operations *ops, int count, void *param)
{
struct ioport *entry;
+ int r;
br_write_lock();
if (port == IOPORT_EMPTY)
.priv = param,
};
- ioport_insert(&ioport_tree, entry);
-
+ r = ioport_insert(&ioport_tree, entry);
+ if (r < 0) {
+ free(entry);
+ br_write_unlock();
+ return r;
+ }
br_write_unlock();
return port;
#include "kvm/kvm-ipc.h"
#include <linux/kvm.h>
+#include <linux/err.h>
#include <sys/un.h>
#include <sys/stat.h>
static char kvm_dir[PATH_MAX];
-static void set_dir(const char *fmt, va_list args)
+static int set_dir(const char *fmt, va_list args)
{
char tmp[PATH_MAX];
mkdir(tmp, 0777);
if (!realpath(tmp, kvm_dir))
- die("Unable to set KVM tool directory");
+ return -errno;
strcat(kvm_dir, "/");
+
+ return 0;
}
void kvm__set_dir(const char *fmt, ...)
static int kvm__check_extensions(struct kvm *kvm)
{
- unsigned int i;
+ int i;
for (i = 0; ; i++) {
if (!kvm_req_ext[i].name)
if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
pr_err("Unsuppored KVM extension detected: %s",
kvm_req_ext[i].name);
- return (int)-i;
+ return -i;
}
}
static struct kvm *kvm__new(void)
{
- struct kvm *kvm = calloc(1, sizeof *kvm);
+ struct kvm *kvm = calloc(1, sizeof(*kvm));
if (!kvm)
- die("out of memory");
+ return ERR_PTR(-ENOMEM);
return kvm;
}
int len, r;
if (!kvm->name)
- return -1;
+ return -EINVAL;
sprintf(full_name, "%s/%s%s", kvm__get_dir(), kvm->name,
KVM_SOCK_SUFFIX);
- if (access(full_name, F_OK) == 0)
- die("Socket file %s already exist", full_name);
+ if (access(full_name, F_OK) == 0) {
+ pr_err("Socket file %s already exist", full_name);
+ return -EEXIST;
+ }
s = socket(AF_UNIX, SOCK_STREAM, 0);
if (s < 0)
fail:
close(s);
- return -1;
+ return r;
}
void kvm__remove_socket(const char *name)
/* Tell the user clean ghost socket file */
pr_err("\"%s\" could be a ghost socket file, please remove it",
sock_file);
- return -1;
+ return r;
} else if (r < 0) {
- die("Failed connecting to instance");
+ return r;
}
return s;
dir = opendir(kvm__get_dir());
if (!dir)
- return -1;
+ return -errno;
for (;;) {
readdir_r(dir, &entry, &result);
return ret;
}
-void kvm__delete(struct kvm *kvm)
+int kvm__exit(struct kvm *kvm)
{
kvm__stop_timer(kvm);
kvm_ipc__stop();
kvm__remove_socket(kvm->name);
free(kvm);
+
+ return 0;
}
/*
* memory regions to it. Therefore, be careful if you use this function for
* registering memory regions for emulating hardware.
*/
-void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
+int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
{
struct kvm_userspace_memory_region mem;
int ret;
ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
if (ret < 0)
- die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
+ return -errno;
+
+ return 0;
}
int kvm__recommended_cpus(struct kvm *kvm)
struct kvm *kvm;
int ret;
- if (!kvm__arch_cpu_supports_vm())
- die("Your CPU does not support hardware virtualization");
+ if (!kvm__arch_cpu_supports_vm()) {
+ pr_err("Your CPU does not support hardware virtualization");
+ return ERR_PTR(-ENOSYS);
+ }
kvm = kvm__new();
+ if (IS_ERR_OR_NULL(kvm))
+ return kvm;
kvm->sys_fd = open(kvm_dev, O_RDWR);
if (kvm->sys_fd < 0) {
- if (errno == ENOENT)
- die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
- if (errno == ENODEV)
- die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
-
- fprintf(stderr, " Fatal, could not open %s: ", kvm_dev);
- perror(NULL);
- exit(1);
+ if (errno == ENOENT) {
+ pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM "
+ "enabled and that the KVM modules are loaded.", kvm_dev);
+ ret = -errno;
+ goto cleanup;
+ }
+ if (errno == ENODEV) {
+ die("'%s' KVM driver not available.\n # (If the KVM "
+ "module is loaded then 'dmesg' may offer further clues "
+ "about the failure.)", kvm_dev);
+ ret = -errno;
+ goto cleanup;
+ }
+
+ pr_err("Could not open %s: ", kvm_dev);
+ ret = -errno;
+ goto cleanup;
}
ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
- if (ret != KVM_API_VERSION)
- die_perror("KVM_API_VERSION ioctl");
+ if (ret != KVM_API_VERSION) {
+ pr_err("KVM_API_VERSION ioctl");
+ ret = -errno;
+ goto cleanup;
+ }
kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
- if (kvm->vm_fd < 0)
- die_perror("KVM_CREATE_VM ioctl");
+ if (kvm->vm_fd < 0) {
+ ret = kvm->vm_fd;
+ goto cleanup;
+ }
- if (kvm__check_extensions(kvm))
- die("A required KVM extention is not supported by OS");
+ if (kvm__check_extensions(kvm)) {
+ pr_err("A required KVM extention is not supported by OS");
+ ret = -ENOSYS;
+ }
kvm__arch_init(kvm, hugetlbfs_path, ram_size);
kvm_ipc__start(kvm__create_socket(kvm));
kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid);
return kvm;
+cleanup:
+ close(kvm->vm_fd);
+ close(kvm->sys_fd);
+ free(kvm);
+
+ return ERR_PTR(ret);
}
/* RFC 1952 */
#include <linux/kvm.h>
#include <linux/types.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
+#include <errno.h>
#define mmio_node(n) rb_entry(n, struct mmio_mapping, node)
return "read";
}
-bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, bool coalesce,
+int kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, bool coalesce,
void (*mmio_fn)(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr),
void *ptr)
{
mmio = malloc(sizeof(*mmio));
if (mmio == NULL)
- return false;
+ return -ENOMEM;
*mmio = (struct mmio_mapping) {
.node = RB_INT_INIT(phys_addr, phys_addr + phys_addr_len),
ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
if (ret < 0) {
free(mmio);
- return false;
+ return -errno;
}
}
br_write_lock();
#include <kvm/rbtree-interval.h>
#include <stddef.h>
+#include <errno.h>
struct rb_int_node *rb_int_search_single(struct rb_root *root, u64 point)
{
int rb_int_insert(struct rb_root *root, struct rb_int_node *i_node)
{
- struct rb_node **node = &(root->rb_node), *parent = NULL;
+ struct rb_node **node = &(root->rb_node), *parent = NULL;
while (*node) {
- int result = i_node->low - rb_int(*node)->low;
+ int result = i_node->low - rb_int(*node)->low;
parent = *node;
if (result < 0)
else if (result > 0)
node = &((*node)->rb_right);
else
- return 0;
+ return -EEXIST;
}
rb_link_node(&i_node->node, parent, node);
rb_insert_color(&i_node->node, root);
rb_augment_insert(&i_node->node, update_node_max_high, NULL);
- return 1;
+ return 0;
}
void rb_int_erase(struct rb_root *root, struct rb_int_node *node)
.signal_vq = virtio_pci__signal_vq,
.signal_config = virtio_pci__signal_config,
.init = virtio_pci__init,
+ .uninit = virtio_pci__exit,
};
return &virtio_pci_trans;
};
return r;
vpci->base_addr = (u16)r;
- kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE, false, callback_mmio_table, vpci);
+ r = kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE, false, callback_mmio_table, vpci);
+ if (r < 0)
+ goto free_ioport;
vpci->pci_hdr = (struct pci_device_header) {
.vendor_id = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET),
vpci->pci_hdr.msix.pba_offset = cpu_to_le32(1 | PCI_IO_SIZE); /* Use BAR 3 */
vpci->config_vector = 0;
- if (irq__register_device(subsys_id, &ndev, &pin, &line) < 0)
- return -1;
+ r = irq__register_device(subsys_id, &ndev, &pin, &line);
+ if (r < 0)
+ goto free_mmio;
vpci->pci_hdr.irq_pin = pin;
vpci->pci_hdr.irq_line = line;
- pci__register(&vpci->pci_hdr, ndev);
+ r = pci__register(&vpci->pci_hdr, ndev);
+ if (r < 0)
+ goto free_ioport;
return 0;
+
+free_mmio:
+ kvm__deregister_mmio(kvm, vpci->msix_io_block);
+free_ioport:
+ ioport__unregister(vpci->base_addr);
+ return r;
}
+
+int virtio_pci__exit(struct kvm *kvm, struct virtio_trans *vtrans)
+{
+ struct virtio_pci *vpci = vtrans->virtio;
+ int i;
+
+ kvm__deregister_mmio(kvm, vpci->msix_io_block);
+ ioport__unregister(vpci->base_addr);
+
+ for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++)
+ ioeventfd__del_event(vpci->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, i);
+
+ return 0;
+}
\ No newline at end of file
.get_size_vq = get_size_vq,
};
-void virtio_rng__init(struct kvm *kvm)
+int virtio_rng__init(struct kvm *kvm)
{
struct rng_dev *rdev;
+ int r;
rdev = malloc(sizeof(*rdev));
if (rdev == NULL)
- return;
+ return -ENOMEM;
rdev->fd = open("/dev/urandom", O_RDONLY);
- if (rdev->fd < 0)
- die("Failed initializing RNG");
+ if (rdev->fd < 0) {
+ r = rdev->fd;
+ goto cleanup;
+ }
virtio_trans_init(&rdev->vtrans, VIRTIO_PCI);
- rdev->vtrans.trans_ops->init(kvm, &rdev->vtrans, rdev, PCI_DEVICE_ID_VIRTIO_RNG,
+ r = rdev->vtrans.trans_ops->init(kvm, &rdev->vtrans, rdev, PCI_DEVICE_ID_VIRTIO_RNG,
VIRTIO_ID_RNG, PCI_CLASS_RNG);
+ if (r < 0)
+ goto cleanup;
+
rdev->vtrans.virtio_ops = &rng_dev_virtio_ops;
list_add_tail(&rdev->list, &rdevs);
"Please make sure that the guest kernel was "
"compiled with CONFIG_HW_RANDOM_VIRTIO=y enabled "
"in its .config");
+ return 0;
+cleanup:
+ close(rdev->fd);
+ free(rdev);
+
+ return r;
}
-void virtio_rng__delete_all(struct kvm *kvm)
+int virtio_rng__exit(struct kvm *kvm)
{
struct rng_dev *rdev, *tmp;
list_for_each_entry_safe(rdev, tmp, &rdevs, list) {
list_del(&rdev->list);
+ rdev->vtrans.trans_ops->uninit(kvm, &rdev->vtrans);
free(rdev);
}
+
+ return 0;
}
case VIRTIO_PCI:
trans = calloc(sizeof(struct virtio_pci), 1);
if (!trans)
- die("Failed allocating virtio transport");
+ return -ENOMEM;
vtrans->virtio = trans;
vtrans->trans_ops = virtio_pci__get_trans_ops();
default:
return -1;
};
+
+ return 0;
}
\ No newline at end of file
struct rb_node *node = root->rb_node;
while (node) {
- struct pci_dev *data = container_of(node, struct pci_dev, node);
+ struct pci_dev *data = rb_entry(node, struct pci_dev, node);
int result;
result = id - data->id;
int irq__exit(struct kvm *kvm)
{
+ struct rb_node *ent;
+
free(irq_routing);
+ for (ent = rb_first(&pci_tree); ent; ent = rb_next(ent)) {
+ struct pci_dev *dev;
+ struct irq_line *line;
+ struct list_head *node, *tmp;
+
+ dev = rb_entry(ent, struct pci_dev, node);
+ list_for_each_safe(node, tmp, &dev->lines) {
+ line = list_entry(node, struct irq_line, node);
+ free(line);
+ }
+ free(dev);
+ }
+
return 0;
}