1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
use std::convert::{AsMut, AsRef};
use std::io;
use std::ops;
use std::os::unix::io::AsRawFd;
pub mod cap;
mod fmt;
pub mod kvm;
pub mod kvm_sys;
pub mod vcpu;
pub mod vm;
pub mod x86_64;
/// Strong type representing physical addresses.
pub struct PhysAddr(pub u64);
/// Helper to turn libc return values into an [io::Result](std::io::Result). Returns
/// [`Error::last_os_error`](std::io::Error::last_os_error) if `ret < 0`.
fn libcret(ret: libc::c_int) -> io::Result<libc::c_int> {
if ret < 0 {
Err(io::Error::last_os_error())
} else {
Ok(ret)
}
}
/// Wrapper of `libc::ioctl` for KVM ioctls with one argument and returning an
/// [`io::Result`](std::io::Result).
fn ioctl<F: AsRawFd>(fd: &F, cmd: u64, arg: u64) -> io::Result<libc::c_int> {
libcret(unsafe { libc::ioctl(fd.as_raw_fd(), cmd, arg) })
}
/// Wrapper to safely allocate memory for guest VMs.
///
/// The underlying memory is freed automatically once the `UserMem` instance is dropped.
///
/// Memory can be mapped into a guest VM with
/// [`Vm::set_user_memory_region`](crate::vm::Vm::set_user_memory_region).
pub struct UserMem {
ptr: *mut u8,
len: usize,
}
impl UserMem {
/// Allocate a zero-initialized memory region of `len` bytes.
pub fn new(len: usize) -> io::Result<UserMem> {
let ptr = unsafe {
libc::mmap(
std::ptr::null_mut(),
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1,
0,
)
};
if ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(UserMem {
ptr: ptr.cast(),
len,
})
}
}
/// Allocate a zero-initialized memory region of `len` bytes and initialize the first bytes
/// with `init_from`.
///
/// # Panics
///
/// Panics if `init_from` is larger than the memory size `len`.
pub fn with_init(len: usize, init_from: &[u8]) -> io::Result<UserMem> {
assert!(len >= init_from.len());
let mut m = UserMem::new(len)?;
m.load(PhysAddr(0), init_from);
Ok(m)
}
/// Load the bytes stored in `data` into memory at physical address `addr`.
///
/// # Panics
///
/// Panics if `addr + data.len` is larger than the memory size `len`.
pub fn load(&mut self, addr: PhysAddr, data: &[u8]) {
assert!(self.len >= addr.0 as usize + data.len());
let addr = addr.0 as usize;
self.as_mut()[addr..addr + data.len()].copy_from_slice(data);
}
}
impl ops::Drop for UserMem {
/// Free underlying memory.
fn drop(&mut self) {
unsafe { libc::munmap(self.ptr.cast(), self.len) };
}
}
impl AsRef<[u8]> for UserMem {
fn as_ref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
}
}
impl AsMut<[u8]> for UserMem {
fn as_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) }
}
}
/// Internal wrapper to automatically `mmap` and `munmap` the the [`struct kvm_run`][kvm_run]
/// for a given VPCU.
///
/// [kvm_run]: https://www.kernel.org/doc/html/latest/virt/kvm/api.html#the-kvm-run-structure
struct KvmRun {
ptr: *mut kvm_sys::kvm_run,
len: usize,
}
impl KvmRun {
/// Mmap the `struct kvm_run` for a given `VCPU` referenced by the argument file descriptor
/// `vcpu`.
fn new<F: AsRawFd>(vcpu: &F, len: usize) -> io::Result<KvmRun> {
let ptr = unsafe {
libc::mmap(
std::ptr::null_mut(),
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
vcpu.as_raw_fd(),
0,
)
};
if ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(KvmRun {
ptr: ptr.cast(),
len,
})
}
}
}
impl ops::Drop for KvmRun {
/// Munmap the mmaped `struct kvm_run`.
fn drop(&mut self) {
unsafe { libc::munmap(self.ptr.cast(), self.len) };
}
}
impl AsRef<kvm_sys::kvm_run> for KvmRun {
fn as_ref(&self) -> &kvm_sys::kvm_run {
unsafe { &*(self.ptr as *const kvm_sys::kvm_run) }
}
}
impl AsMut<kvm_sys::kvm_run> for KvmRun {
fn as_mut(&mut self) -> &mut kvm_sys::kvm_run {
unsafe { &mut *(self.ptr as *mut kvm_sys::kvm_run) }
}
}