kvm_rs/
lib.rs

1// SPDX-License-Identifier: MIT
2//
3// Copyright (c) 2021, Johannes Stoelp <dev@memzero.de>
4
5use std::convert::{AsMut, AsRef};
6use std::io;
7use std::ops;
8use std::os::unix::io::AsRawFd;
9
10pub mod cap;
11mod fmt;
12pub mod kvm;
13pub mod kvm_sys;
14pub mod vcpu;
15pub mod vm;
16pub mod x86_64;
17
18/// Strong type representing physical addresses.
19pub struct PhysAddr(pub u64);
20
21/// Helper to turn libc return values into an [io::Result](std::io::Result). Returns
22/// [`Error::last_os_error`](std::io::Error::last_os_error) if `ret < 0`.
23fn libcret(ret: libc::c_int) -> io::Result<libc::c_int> {
24    if ret < 0 {
25        Err(io::Error::last_os_error())
26    } else {
27        Ok(ret)
28    }
29}
30
31/// Wrapper of `libc::ioctl` for KVM ioctls with one argument and returning an
32/// [`io::Result`](std::io::Result).
33fn ioctl<F: AsRawFd>(fd: &F, cmd: u64, arg: u64) -> io::Result<libc::c_int> {
34    libcret(unsafe { libc::ioctl(fd.as_raw_fd(), cmd, arg) })
35}
36
37/// Wrapper to safely allocate memory for guest VMs.
38///
39/// The underlying memory is freed automatically once the `UserMem` instance is dropped.
40///
41/// Memory can be mapped into a guest VM with
42/// [`Vm::set_user_memory_region`](crate::vm::Vm::set_user_memory_region).
43pub struct UserMem {
44    ptr: *mut u8,
45    len: usize,
46}
47
48impl UserMem {
49    /// Allocate a zero-initialized memory region of `len` bytes.
50    pub fn new(len: usize) -> io::Result<UserMem> {
51        let ptr = unsafe {
52            libc::mmap(
53                std::ptr::null_mut(),
54                len,
55                libc::PROT_READ | libc::PROT_WRITE,
56                libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
57                -1,
58                0,
59            )
60        };
61
62        if ptr == libc::MAP_FAILED {
63            Err(io::Error::last_os_error())
64        } else {
65            Ok(UserMem {
66                ptr: ptr.cast(),
67                len,
68            })
69        }
70    }
71
72    /// Allocate a zero-initialized memory region of `len` bytes and initialize the first bytes
73    /// with `init_from`.
74    ///
75    /// # Panics
76    ///
77    /// Panics if `init_from` is larger than the memory size `len`.
78    pub fn with_init(len: usize, init_from: &[u8]) -> io::Result<UserMem> {
79        assert!(len >= init_from.len());
80
81        let mut m = UserMem::new(len)?;
82        m.load(PhysAddr(0), init_from);
83        Ok(m)
84    }
85
86    /// Load the bytes stored in `data` into memory at physical address `addr`.
87    ///
88    /// # Panics
89    ///
90    /// Panics if `addr + data.len` is larger than the memory size `len`.
91    pub fn load(&mut self, addr: PhysAddr, data: &[u8]) {
92        assert!(self.len >= addr.0 as usize + data.len());
93
94        let addr = addr.0 as usize;
95        self.as_mut()[addr..addr + data.len()].copy_from_slice(data);
96    }
97}
98
99impl ops::Drop for UserMem {
100    /// Free underlying memory.
101    fn drop(&mut self) {
102        unsafe { libc::munmap(self.ptr.cast(), self.len) };
103    }
104}
105
106impl AsRef<[u8]> for UserMem {
107    fn as_ref(&self) -> &[u8] {
108        unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
109    }
110}
111
112impl AsMut<[u8]> for UserMem {
113    fn as_mut(&mut self) -> &mut [u8] {
114        unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) }
115    }
116}
117
118/// Internal wrapper to automatically `mmap` and `munmap` the the [`struct kvm_run`][kvm_run]
119/// for a given VPCU.
120///
121/// [kvm_run]: https://www.kernel.org/doc/html/latest/virt/kvm/api.html#the-kvm-run-structure
122struct KvmRun {
123    ptr: *mut kvm_sys::kvm_run,
124    len: usize,
125}
126
127impl KvmRun {
128    /// Mmap the `struct kvm_run` for a given `VCPU` referenced by the argument file descriptor
129    /// `vcpu`.
130    fn new<F: AsRawFd>(vcpu: &F, len: usize) -> io::Result<KvmRun> {
131        let ptr = unsafe {
132            libc::mmap(
133                std::ptr::null_mut(),
134                len,
135                libc::PROT_READ | libc::PROT_WRITE,
136                libc::MAP_SHARED,
137                vcpu.as_raw_fd(),
138                0,
139            )
140        };
141
142        if ptr == libc::MAP_FAILED {
143            Err(io::Error::last_os_error())
144        } else {
145            Ok(KvmRun {
146                ptr: ptr.cast(),
147                len,
148            })
149        }
150    }
151}
152
153impl ops::Drop for KvmRun {
154    /// Munmap the mmaped `struct kvm_run`.
155    fn drop(&mut self) {
156        unsafe { libc::munmap(self.ptr.cast(), self.len) };
157    }
158}
159
160impl AsRef<kvm_sys::kvm_run> for KvmRun {
161    fn as_ref(&self) -> &kvm_sys::kvm_run {
162        unsafe { &*(self.ptr as *const kvm_sys::kvm_run) }
163    }
164}
165
166impl AsMut<kvm_sys::kvm_run> for KvmRun {
167    fn as_mut(&mut self) -> &mut kvm_sys::kvm_run {
168        unsafe { &mut *(self.ptr as *mut kvm_sys::kvm_run) }
169    }
170}