csize &= (q->cluster_size - 1);
if (pread_in_full(q->fd, q->cluster_data, csize,
- coffset) < 0) {
+ coffset) < 0)
goto out_error;
- }
if (qcow_decompress_buffer(q->cluster_cache, q->cluster_size,
- q->cluster_data, csize) < 0) {
+ q->cluster_data, csize) < 0)
goto out_error;
- }
memcpy(dst, q->cluster_cache + clust_offset, length);
mutex_unlock(&q->mutex);
- } else{
+ } else {
if (!clust_start)
goto zero_cluster;
memcpy(dst, q->cluster_cache + clust_offset, length);
mutex_unlock(&q->mutex);
- } else{
+ } else {
clust_start &= QCOW2_OFFSET_MASK;
if (!clust_start)
goto zero_cluster;
char *buf;
u32 nr;
- buf = dst;
- nr_read = 0;
+ buf = dst;
+ nr_read = 0;
while (nr_read < dst_len) {
- offset = sector << SECTOR_SHIFT;
+ offset = sector << SECTOR_SHIFT;
if (offset >= header->size)
return -1;
if (nr <= 0)
return -1;
- nr_read += nr;
- buf += nr;
- sector += (nr >> SECTOR_SHIFT);
+ nr_read += nr;
+ buf += nr;
+ sector += (nr >> SECTOR_SHIFT);
}
return dst_len;
return -1;
}
- sector += iov->iov_len >> SECTOR_SHIFT;
+ sector += iov->iov_len >> SECTOR_SHIFT;
+ total += nr;
iov++;
- total += nr;
}
return total;
ops = entry->ops;
while (count--) {
- if (direction == KVM_EXIT_IO_IN) {
- if (ops->io_in)
+ if (direction == KVM_EXIT_IO_IN && ops->io_in)
ret = ops->io_in(entry, kvm, port, ptr, size);
- } else {
- if (ops->io_out)
+ else if (ops->io_out)
ret = ops->io_out(entry, kvm, port, ptr, size);
- }
ptr += size;
}
void *p = pci_devices[dev_num];
memcpy(data, p + offset, size);
- } else
+ } else {
memset(data, 0x00, size);
- } else
+ }
+ } else {
memset(data, 0xff, size);
+ }
}
void pci__register(struct pci_device_header *dev, u8 dev_num)
#include <stdio.h>
#include <bfd.h>
-static bfd *abfd;
+static bfd *abfd;
void symbol__init(const char *vmlinux)
{
bfd_init();
- abfd = bfd_openr(vmlinux, NULL);
+ abfd = bfd_openr(vmlinux, NULL);
}
static asymbol *lookup(asymbol **symbols, int nr_symbols, const char *symbol_name)
if (!bfd_check_format(abfd, bfd_object))
goto not_found;
- symtab_size = bfd_get_symtab_upper_bound(abfd);
+ symtab_size = bfd_get_symtab_upper_bound(abfd);
if (!symtab_size)
goto not_found;
- syms = malloc(symtab_size);
+ syms = malloc(symtab_size);
if (!syms)
goto not_found;
- nr_syms = bfd_canonicalize_symtab(abfd, syms);
+ nr_syms = bfd_canonicalize_symtab(abfd, syms);
- section = bfd_get_section_by_name(abfd, ".debug_aranges");
+ section = bfd_get_section_by_name(abfd, ".debug_aranges");
if (!section)
goto not_found;
if (!func)
goto not_found;
- symbol = lookup(syms, nr_syms, func);
+ symbol = lookup(syms, nr_syms, func);
if (!symbol)
goto not_found;
- sym_start = bfd_asymbol_value(symbol);
+ sym_start = bfd_asymbol_value(symbol);
- sym_offset = addr - sym_start;
+ sym_offset = addr - sym_start;
snprintf(sym, size, "%s+%llx (%s:%i)", func, (long long) sym_offset, filename, line);
#include "kvm/kvm.h"
#include "kvm/kvm-cpu.h"
-
#define TERM_FD_IN 0
#define TERM_FD_OUT 1
if (statfs(htlbfs_path, &sfs) < 0)
die("Can't stat %s\n", htlbfs_path);
- if ((unsigned int)sfs.f_type != HUGETLBFS_MAGIC) {
+ if ((unsigned int)sfs.f_type != HUGETLBFS_MAGIC)
die("%s is not hugetlbfs!\n", htlbfs_path);
- }
blk_size = (unsigned long)sfs.f_bsize;
if (sfs.f_bsize == 0 || blk_size > size) {
u16 out, in, head;
u32 *ptrs, i;
- head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
- ptrs = iov[0].iov_base;
- len = iov[0].iov_len / sizeof(u32);
+ head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
+ ptrs = iov[0].iov_base;
+ len = iov[0].iov_len / sizeof(u32);
for (i = 0 ; i < len ; i++) {
void *guest_ptr;
desc = vq->vring.desc;
if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
-
max = desc[idx].len / sizeof(struct vring_desc);
desc = guest_flat_to_host(kvm, desc[idx].addr);
idx = 0;
vq = &ndev->vqs[VIRTIO_NET_RX_QUEUE];
while (1) {
-
mutex_lock(&ndev->io_rx_lock);
if (!virt_queue__available(vq))
pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock);
mutex_unlock(&ndev->io_rx_lock);
while (virt_queue__available(vq)) {
-
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
-
len = ndev->ops->rx(iov, in, ndev);
-
virt_queue__set_used_elem(vq, head, len);
/* We should interrupt guest right now, otherwise latency is huge. */
ndev->vtrans.trans_ops->signal_vq(kvm, &ndev->vtrans,
VIRTIO_NET_RX_QUEUE);
}
-
}
pthread_exit(NULL);
mutex_unlock(&ndev->io_tx_lock);
while (virt_queue__available(vq)) {
-
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
-
len = ndev->ops->tx(iov, out, ndev);
-
virt_queue__set_used_elem(vq, head, len);
}
compat__remove_message(compat_id);
- queue = &ndev->vqs[vq];
- queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ queue = &ndev->vqs[vq];
+ queue->pfn = pfn;
+ p = guest_pfn_to_host(kvm, queue->pfn);
vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
vpci->config_gsi = gsi;
break;
- case VIRTIO_MSI_QUEUE_VECTOR: {
+ case VIRTIO_MSI_QUEUE_VECTOR:
vec = vpci->vq_vector[vpci->queue_selector] = ioport__read16(data);
gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg);
vtrans->virtio_ops->notify_vq_gsi(kvm, vpci->dev,
vpci->queue_selector, gsi);
break;
- }
};
return true;
vtrans->virtio_ops->init_vq(kvm, vpci->dev, vpci->queue_selector, val);
break;
case VIRTIO_PCI_QUEUE_SEL:
- vpci->queue_selector = ioport__read16(data);
+ vpci->queue_selector = ioport__read16(data);
break;
case VIRTIO_PCI_QUEUE_NOTIFY:
- val = ioport__read16(data);
+ val = ioport__read16(data);
vtrans->virtio_ops->notify_vq(kvm, vpci->dev, val);
break;
case VIRTIO_PCI_STATUS:
- vpci->status = ioport__read8(data);
+ vpci->status = ioport__read8(data);
break;
default:
ret = virtio_pci__specific_io_out(kvm, vtrans, port, data, size, offset);
unsigned int len = 0;
u16 out, in, head;
- head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
- len = readv(rdev->fd, iov, in);
+ head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
+ len = readv(rdev->fd, iov, in);
virt_queue__set_used_elem(queue, head, len);
static void virtio_rng_do_io(struct kvm *kvm, void *param)
{
- struct rng_dev_job *job = param;
- struct virt_queue *vq = job->vq;
- struct rng_dev *rdev = job->rdev;
+ struct rng_dev_job *job = param;
+ struct virt_queue *vq = job->vq;
+ struct rng_dev *rdev = job->rdev;
while (virt_queue__available(vq))
virtio_rng_do_io_request(kvm, rdev, vq);
compat__remove_message(compat_id);
- queue = &rdev->vqs[vq];
- queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ queue = &rdev->vqs[vq];
+ queue->pfn = pfn;
+ p = guest_pfn_to_host(kvm, queue->pfn);
job = &rdev->jobs[vq];
vring_init(&queue->vring, VIRTIO_RNG_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
- *job = (struct rng_dev_job) {
- .vq = queue,
- .rdev = rdev,
+ *job = (struct rng_dev_job) {
+ .vq = queue,
+ .rdev = rdev,
};
thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job);
void virtio_rng__delete_all(struct kvm *kvm)
{
- while (!list_empty(&rdevs)) {
- struct rng_dev *rdev;
+ struct rng_dev *rdev, *tmp;
- rdev = list_first_entry(&rdevs, struct rng_dev, list);
+ list_for_each_entry_safe(rdev, tmp, &rdevs, list) {
list_del(&rdev->list);
free(rdev);
}
struct real_intr_desc intr_desc;
void *p;
- p = guest_flat_to_host(kvm, handler->address);
+ p = guest_flat_to_host(kvm, handler->address);
memcpy(p, handler->handler, handler->size);
intr_desc = (struct real_intr_desc) {
.offset = handler->address - MB_BIOS_BEGIN,
};
- DIE_IF((handler->address - MB_BIOS_BEGIN) > (unsigned long)0xffff);
+ DIE_IF((handler->address - MB_BIOS_BEGIN) > 0xffffUL);
interrupt_table__set(&kvm->interrupt_table, &intr_desc, handler->irq);
}
BUILD_BUG_ON(i > E820_X_MAX);
- e820->nr_map = i;
+ e820->nr_map = i;
}
static void setup_vga_rom(struct kvm *kvm)
switch (entry->function) {
case 6:
/* Clear presence of IA32_ENERGY_PERF_BIAS */
- entry->ecx = entry->ecx & ~(1 << 3);
+ entry->ecx = entry->ecx & ~(1 << 3);
break;
case CPUID_FUNC_PERFMON:
- entry->eax = 0x00; /* disable it */
+ entry->eax = 0x00; /* disable it */
break;
default:
/* Keep the CPUID function as -is */
{
struct kvm_cpuid2 *kvm_cpuid;
- kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries));
+ kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) +
+ MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries));
kvm_cpuid->nent = MAX_KVM_CPUID_ENTRIES;
if (ioctl(vcpu->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
itable->entries[i] = *entry;
}
-void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num)
+void interrupt_table__set(struct interrupt_table *itable,
+ struct real_intr_desc *entry, unsigned int num)
{
if (num < REAL_INTR_VECTORS)
itable->entries[num] = *entry;
/*
* KVM on Intel requires 'base' to be 'selector * 16' in real mode.
*/
- return (u32)selector * 16;
+ return (u32)selector << 4;
}
static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
{
struct kvm_cpu *vcpu;
- vcpu = calloc(1, sizeof *vcpu);
+ vcpu = calloc(1, sizeof(*vcpu));
if (!vcpu)
return NULL;
- vcpu->kvm = kvm;
+ vcpu->kvm = kvm;
return vcpu;
}
int mmap_size;
int coalesced_offset;
- vcpu = kvm_cpu__new(kvm);
+ vcpu = kvm_cpu__new(kvm);
if (!vcpu)
return NULL;
- vcpu->cpu_id = cpu_id;
+ vcpu->cpu_id = cpu_id;
vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
if (vcpu->vcpu_fd < 0)
vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_FAST_STRING);
- vcpu->msrs->nmsrs = ndx;
+ vcpu->msrs->nmsrs = ndx;
if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0)
die_perror("KVM_SET_MSRS failed");
static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
{
vcpu->fpu = (struct kvm_fpu) {
- .fcw = 0x37f,
- .mxcsr = 0x1f80,
+ .fcw = 0x37f,
+ .mxcsr = 0x1f80,
};
if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0)
{
vcpu->regs = (struct kvm_regs) {
/* We start the guest in 16-bit real mode */
- .rflags = 0x0000000000000002ULL,
+ .rflags = 0x0000000000000002ULL,
- .rip = vcpu->kvm->boot_ip,
- .rsp = vcpu->kvm->boot_sp,
- .rbp = vcpu->kvm->boot_sp,
+ .rip = vcpu->kvm->boot_ip,
+ .rsp = vcpu->kvm->boot_sp,
+ .rbp = vcpu->kvm->boot_sp,
};
if (vcpu->regs.rip > USHRT_MAX)
- die("ip 0x%llx is too high for real mode", (u64) vcpu->regs.rip);
+ die("ip 0x%llx is too high for real mode", (u64)vcpu->regs.rip);
if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
die_perror("KVM_SET_REGS failed");
static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
{
-
if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_GET_SREGS failed");
dprintf(debug_fd, "\n");
}
-#define MAX_SYM_LEN 128
+#define MAX_SYM_LEN 128
void kvm_cpu__show_code(struct kvm_cpu *vcpu)
{
unsigned int code_bytes = 64;
- unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_prologue = 43;
unsigned int code_len = code_bytes;
char sym[MAX_SYM_LEN];
unsigned char c;
if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die("KVM_GET_SREGS failed");
- pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3);
+ pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3);
if (!host_ptr_in_ram(vcpu->kvm, pte4))
return;
- pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff));
+ pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff));
if (!host_ptr_in_ram(vcpu->kvm, pte3))
return;
- pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff));
+ pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff));
if (!host_ptr_in_ram(vcpu->kvm, pte2))
return;
- pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff));
+ pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff));
if (!host_ptr_in_ram(vcpu->kvm, pte1))
return;
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
-#include <asm/unistd.h>
struct kvm_ext kvm_req_ext[] = {
{ DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
{
strcpy(cmdline, "noapic noacpi pci=conf1 reboot=k panic=1 i8042.direct=1 "
"i8042.dumbkbd=1 i8042.nopnp=1");
- if (video) {
+ if (video)
strcat(cmdline, " video=vesafb console=tty0");
- } else
+ else
strcat(cmdline, " console=ttyS0 earlyprintk=serial i8042.noaux=1");
}
/* This function wraps the decision between hugetlbfs map (if requested) or normal mmap */
static void *mmap_anon_or_hugetlbfs(const char *hugetlbfs_path, u64 size)
{
- if (hugetlbfs_path) {
+ if (hugetlbfs_path)
/*
* We don't /need/ to map guest RAM from hugetlbfs, but we do so
* if the user specifies a hugetlbfs path.
*/
return mmap_hugetlbfs(hugetlbfs_path, size);
- } else {
+ else
return mmap(NULL, size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
- }
}
/* Architecture-specific KVM init */
if (ret < 0)
die_perror("KVM_CREATE_PIT2 ioctl");
- kvm->ram_size = ram_size;
+ kvm->ram_size = ram_size;
if (kvm->ram_size < KVM_32BIT_GAP_START) {
kvm->ram_start = mmap_anon_or_hugetlbfs(hugetlbfs_path, ram_size);
} else {
kvm->ram_start = mmap_anon_or_hugetlbfs(hugetlbfs_path, ram_size + KVM_32BIT_GAP_SIZE);
- if (kvm->ram_start != MAP_FAILED) {
+ if (kvm->ram_start != MAP_FAILED)
/*
* We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
* if we accidently write to it, we will know.
*/
mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
- }
}
if (kvm->ram_start == MAP_FAILED)
die("out of memory");
return true;
}
-static const char *BZIMAGE_MAGIC = "HdrS";
+static const char *BZIMAGE_MAGIC = "HdrS";
bool load_bzimage(struct kvm *kvm, int fd_kernel,
int fd_initrd, const char *kernel_cmdline, u16 vidmode)
kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
}
- kvm->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->boot_selector = BOOT_LOADER_SELECTOR;
/*
* The real-mode setup code starts at offset 0x200 of a bzImage. See
* Documentation/x86/boot.txt for details.
*/
- kvm->boot_ip = BOOT_LOADER_IP + 0x200;
- kvm->boot_sp = BOOT_LOADER_SP;
+ kvm->boot_ip = BOOT_LOADER_IP + 0x200;
+ kvm->boot_sp = BOOT_LOADER_SP;
return true;
}