repo_name stringlengths 1 62 | dataset stringclasses 1 value | lang stringclasses 11 values | pr_id int64 1 20.1k | owner stringlengths 2 34 | reviewer stringlengths 2 39 | diff_hunk stringlengths 15 262k | code_review_comment stringlengths 1 99.6k |
|---|---|---|---|---|---|---|---|
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::cell::RefCell;
+
+use super::TaskState;
+use super::{Task, TaskError};
+use crate::error::SvsmError;
+use crate::locking::SpinLock;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, LinkedList};
+
+pub type TaskPointer = Rc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ list_link: Link,
+ pub task: RefCell<Box<Task>>,
+}
+
+intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::<TaskListAdapter>::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> {
+ let mut cursor = self.list.as_ref().unwrap().front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+}
+
+pub static TASKLIST: SpinLock<TaskList> = SpinLock::new(TaskList::new());
+
+pub fn create_task(
+ entry: extern "C" fn(),
+ flags: u16,
+ affinity: Option<u32>,
+) -> Result<TaskPointer, SvsmError> {
+ let mut task = Task::create(entry, flags)?;
+ task.set_affinity(affinity);
+ let node = Rc::new(TaskNode {
+ list_link: Link::default(),
+ task: RefCell::new(task),
+ });
+ let node_ret = node.clone();
+ {
+ let mut tl = TASKLIST.lock();
+ tl.list().push_front(node);
+ }
+ Ok(node_ret) | I don't think we need to scope this, no? The lock will be dropped at the end of the function:
```rust
...
let node = ...;
TASKLIST.lock().push_front(node.clone());
Ok(node)
}
``` |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::cell::RefCell;
+
+use super::TaskState;
+use super::{Task, TaskError};
+use crate::error::SvsmError;
+use crate::locking::SpinLock;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, LinkedList};
+
+pub type TaskPointer = Rc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ list_link: Link,
+ pub task: RefCell<Box<Task>>,
+}
+
+intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::<TaskListAdapter>::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> {
+ let mut cursor = self.list.as_ref().unwrap().front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+}
+
+pub static TASKLIST: SpinLock<TaskList> = SpinLock::new(TaskList::new());
+
+pub fn create_task(
+ entry: extern "C" fn(),
+ flags: u16,
+ affinity: Option<u32>,
+) -> Result<TaskPointer, SvsmError> {
+ let mut task = Task::create(entry, flags)?;
+ task.set_affinity(affinity);
+ let node = Rc::new(TaskNode {
+ list_link: Link::default(),
+ task: RefCell::new(task),
+ });
+ let node_ret = node.clone();
+ {
+ let mut tl = TASKLIST.lock();
+ tl.list().push_front(node);
+ }
+ Ok(node_ret)
+}
+
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn close_task(task: TaskPointer) -> Result<(), TaskError> { | I would add a comment here as requested by clippy. The caller must ensure that the provided pointer comes from the global TASKLIST structure. |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -8,25 +8,156 @@ extern crate alloc;
use core::cell::RefCell;
-use super::TaskState;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use super::{Task, TaskError};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
+intrusive_adapter!(pub TaskTreeAdapter = Rc<TaskNode>: TaskNode { tree_link: RBTreeLink });
intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct TaskScheduler {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl TaskScheduler {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> { | Returning an `Option<TaskPointer>` is probably more clear since we introduced that typedef. |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -8,25 +8,156 @@ extern crate alloc;
use core::cell::RefCell;
-use super::TaskState;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use super::{Task, TaskError};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
+intrusive_adapter!(pub TaskTreeAdapter = Rc<TaskNode>: TaskNode { tree_link: RBTreeLink });
intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct TaskScheduler {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl TaskScheduler {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> {
+ let mut cursor = self.tree.as_ref().unwrap().front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.borrow().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this scheduler instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let tree = &mut self.tree();
+ let cursor = tree.lower_bound(Bound::Included(&0)); | This does not need to be `&mut`, as `tree()` already returns a mutable reference. These two lines can be:
```rust
let cursor = self.tree().lower_bound(Bound::Included(&0));
``` |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -8,25 +8,156 @@ extern crate alloc;
use core::cell::RefCell;
-use super::TaskState;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use super::{Task, TaskError};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
+intrusive_adapter!(pub TaskTreeAdapter = Rc<TaskNode>: TaskNode { tree_link: RBTreeLink });
intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct TaskScheduler {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl TaskScheduler {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> {
+ let mut cursor = self.tree.as_ref().unwrap().front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.borrow().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this scheduler instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let tree = &mut self.tree();
+ let cursor = tree.lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the borrow_mut below.
+ let next_task_ptr = next_task_node.task.as_ptr();
+ let next_task_id = {
+ let mut next_task = next_task_node.task.borrow_mut();
+ next_task.state = TaskState::SCHEDULED;
+ next_task.runtime.schedule_in();
+ next_task.id
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.borrow_mut();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ // This function leaves the tree in an invalid state as it takes the current
+ // task, replacing it with None. The caller must assign a new task or reassign
+ // the current task to the tree before resuming.
+ let task_ptr = self.current_task.take();
+ if task_ptr.is_some() {
+ let mut task_cursor = unsafe {
+ self.tree()
+ .cursor_mut_from_ptr(task_ptr.as_ref().unwrap().as_ref())
+ }; | We can avoid the unwrap here, plus a level of indentation:
```rust
fn update_current_task(&mut self) -> Option<TaskPointer>{
...
let task_ptr = self.current_task.take()?;
let mut task_cursor = unsafe {
self.tree()
.cursor_mut_from_ptr(task_ptr.as_ref())
};
...
Some(task_ptr)
}
``` |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -8,25 +8,156 @@ extern crate alloc;
use core::cell::RefCell;
-use super::TaskState;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use super::{Task, TaskError};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
+intrusive_adapter!(pub TaskTreeAdapter = Rc<TaskNode>: TaskNode { tree_link: RBTreeLink });
intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct TaskScheduler {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl TaskScheduler {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<Rc<TaskNode>> {
+ let mut cursor = self.tree.as_ref().unwrap().front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.borrow().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this scheduler instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let tree = &mut self.tree();
+ let cursor = tree.lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the borrow_mut below.
+ let next_task_ptr = next_task_node.task.as_ptr();
+ let next_task_id = {
+ let mut next_task = next_task_node.task.borrow_mut();
+ next_task.state = TaskState::SCHEDULED;
+ next_task.runtime.schedule_in();
+ next_task.id
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.borrow_mut();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ // This function leaves the tree in an invalid state as it takes the current
+ // task, replacing it with None. The caller must assign a new task or reassign
+ // the current task to the tree before resuming. | I think this is mostly fine since it is a private function, but perhaps we could annotate it with unsafe. |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -277,6 +277,33 @@ pub fn is_current_task(id: u32) -> bool {
}
}
+pub fn current_task_terminated() {
+ // Restrict the scope of the mutable borrow below otherwise when the task context
+ // is switched via schedule() the borrow remains in scope.
+ {
+ let this_task = this_cpu_mut()
+ .runqueue
+ .current_task
+ .as_mut()
+ .expect("Invalid state in task_exit()");
+ let mut current_task = this_task.task.borrow_mut();
+ current_task.state = TaskState::TERMINATED;
+ // Ensure the scheduler does not waste time encountering terminated tasks
+ // by setting a high runtime value
+ current_task.runtime.terminated();
+
+ // Remove it from the CPU scheduler
+ unsafe {
+ let mut cursor = this_cpu_mut()
+ .runqueue
+ .tree()
+ .cursor_mut_from_ptr(this_task.as_ref());
+ cursor.remove();
+ } | So I'm not sure I fully understand, but isn't this what `TaskScheduler::deallocate()` is already doing? Any reason we're not using that function?
Also, I don't see `close_task()` called anywhere, it seems to me that functionality should be done from `TaskScheduler::deallocate()`, which should take `&mut self` (at the moment it is an associated method, which does not make much sense). Otherwise the task will stay forever in the tasklist, right? |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -0,0 +1,75 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::cell::RefCell;
+
+use super::Task;
+use crate::error::SvsmError;
+use crate::locking::SpinLock;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, LinkedList};
+
+pub type TaskPointer = Rc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ list_link: Link,
+ pub task: RefCell<Box<Task>>,
+}
+
+intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::<TaskListAdapter>::new(TaskListAdapter::new())) | Just a nitpick, but rustc can already figure out this generic, we can simply do:
```rust
.get_or_insert_with(|| LinkedList::new(TaskTreeAdapter::new()))
``` |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -9,22 +9,170 @@ extern crate alloc;
use core::cell::RefCell;
use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
-intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl RunQueue {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new())) | Same here, the generic in RBTree can be omitted. |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -9,22 +9,170 @@ extern crate alloc;
use core::cell::RefCell;
use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
use crate::error::SvsmError;
use crate::locking::SpinLock;
use alloc::boxed::Box;
use alloc::rc::Rc;
use intrusive_collections::linked_list::Link;
-use intrusive_collections::{intrusive_adapter, LinkedList};
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
pub type TaskPointer = Rc<TaskNode>;
#[derive(Debug)]
pub struct TaskNode {
+ tree_link: RBTreeLink,
list_link: Link,
pub task: RefCell<Box<Task>>,
}
-intrusive_adapter!(pub TaskListAdapter = Rc<TaskNode>: TaskNode { list_link: Link });
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+}
+
+impl RunQueue {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.borrow().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the borrow_mut below.
+ let next_task_ptr = next_task_node.task.as_ptr();
+ let next_task_id = {
+ let mut next_task = next_task_node.task.borrow_mut();
+ next_task.runtime.schedule_in();
+ next_task.id
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.borrow_mut();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ task_node.task.borrow_mut().runtime.schedule_out();
+
+ // Reinsert the node into the tree so the position is updated with the new rutime
+ let mut task_cursor = unsafe { self.tree().cursor_mut_from_ptr(task_node.as_ref()) };
+ task_cursor.remove();
+ self.tree().insert(task_node.clone());
+ Some(task_node)
+ }
+
+ /// Helper function that determines if a task is a candidate for allocating
+ /// to a CPU
+ fn is_cpu_candidate(id: u32, t: &Task) -> bool {
+ (t.state == TaskState::RUNNING)
+ && t.allocation.is_none()
+ && t.affinity.map_or(true, |a| a == id)
+ }
+
+ /// Iterate through all unallocated tasks and find a suitable candidates
+ /// for allocating to this queue
+ pub fn allocate(&mut self, id: u32) {
+ let mut tl = TASKLIST.lock();
+ let lowest_runtime = if let Some(t) = self.tree().lower_bound(Bound::Included(&0)).get() {
+ t.task.borrow().runtime.value()
+ } else {
+ 0
+ };
+ let mut cursor = tl.list().cursor_mut();
+ while !cursor.peek_next().is_null() {
+ cursor.move_next();
+ // Filter on running, unallocated tasks that either have no affinity
+ // or have an affinity for this CPU ID
+ if cursor.get().is_some_and(|task_node| {
+ Self::is_cpu_candidate(id, task_node.task.borrow().as_ref())
+ }) {
+ {
+ // Restrict the scope of the mutable borrow because the insertion
+ // into the tree also tries to borrow the task.
+ let mut t = cursor.get().unwrap().task.borrow_mut();
+ t.allocation = Some(id);
+ t.runtime.set(lowest_runtime);
+ }
+ self.tree()
+ .insert(cursor.as_cursor().clone_pointer().unwrap());
+ } | Unless I'm missing something about the borrowing here, we can get rid of an unwrap by using `Option::filter()`:
```rust
if let Some(task_node) = cursor.get().filter(|task_node| Self::is_cpu_candidate(...)) {
{
let mut t = task_node.task.borrow_mut();
...
}
...
}
``` |
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -220,5 +221,25 @@ pub fn create_task(
task: RefCell::new(task),
});
TASKLIST.lock().list().push_front(node.clone());
+ // Allocate any unallocated tasks (including the newly created one)
+ // to the current CPU
+ this_cpu_mut().runqueue.allocate(this_cpu().get_apic_id()); | It's unfortunate that we grab the `TASKLIST` lock twice in a row (here and in `RunQueue::allocate()`. I don't think it will be too important for performance so perhaps this is just a nitpick, but I wonder if we could rework this to avoid it, as it seems a bit awkward. Maybe having `allocate()` take a mutable reference to the tasklist? I think we already had something like that in a previous version of the PR. It's not pretty, but it would make the code more explicit.
Also, we can avoid getting a reference to the PerCpu struct twice:
```rust
let this_cpu = this_cpu_mut();
let mut tl = TASKLIST.lock();
tl.push_front(node.clone());
this_cpu.runqueue.allocate(this_cpu.get_apic_id(), &mut tl);
```
|
svsm | github_2023 | others | 64 | coconut-svsm | 00xc | @@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::cell::RefCell;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::SpinLock;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Rc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RefCell<Box<Task>>,
+}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.borrow().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+}
+
+impl RunQueue {
+ pub const fn new() -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::<TaskTreeAdapter>::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.borrow().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the borrow_mut below.
+ let next_task_ptr = next_task_node.task.as_ptr();
+ let next_task_id = {
+ let mut next_task = next_task_node.task.borrow_mut();
+ next_task.runtime.schedule_in();
+ next_task.id
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.borrow_mut();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ let task_state = {
+ let mut task = task_node.task.borrow_mut();
+ task.runtime.schedule_out();
+ task.state
+ };
+
+ if task_state == TaskState::TERMINATED {
+ // The current task has terminated. Make sure it doesn't get added back
+ // into the runtime tree, but also we need to make sure we keep a
+ // reference to the task because the current stack is owned by it.
+ // Put it in a holding location which will be cleared by the next
+ // active task.
+ unsafe {
+ self.deallocate(task_node.clone());
+ }
+ self.terminated_task = Some(task_node);
+ None
+ } else {
+ // Reinsert the node into the tree so the position is updated with the new rutime
+ let mut task_cursor = unsafe { self.tree().cursor_mut_from_ptr(task_node.as_ref()) };
+ task_cursor.remove();
+ self.tree().insert(task_node.clone());
+ Some(task_node)
+ }
+ }
+
+ /// Helper function that determines if a task is a candidate for allocating
+ /// to a CPU
+ fn is_cpu_candidate(id: u32, t: &Task) -> bool {
+ (t.state == TaskState::RUNNING)
+ && t.allocation.is_none()
+ && t.affinity.map_or(true, |a| a == id)
+ }
+
+ /// Iterate through all unallocated tasks and find a suitable candidates
+ /// for allocating to this queue
+ pub fn allocate(&mut self, id: u32) {
+ let mut tl = TASKLIST.lock();
+ let lowest_runtime = if let Some(t) = self.tree().lower_bound(Bound::Included(&0)).get() {
+ t.task.borrow().runtime.value()
+ } else {
+ 0
+ };
+ let mut cursor = tl.list().cursor_mut();
+ while !cursor.peek_next().is_null() {
+ cursor.move_next();
+ // Filter on running, unallocated tasks that either have no affinity
+ // or have an affinity for this CPU ID
+ if cursor.get().is_some_and(|task_node| {
+ Self::is_cpu_candidate(id, task_node.task.borrow().as_ref())
+ }) {
+ {
+ // Restrict the scope of the mutable borrow because the insertion
+ // into the tree also tries to borrow the task.
+ let mut t = cursor.get().unwrap().task.borrow_mut();
+ t.allocation = Some(id);
+ t.runtime.set(lowest_runtime);
+ }
+ self.tree()
+ .insert(cursor.as_cursor().clone_pointer().unwrap());
+ }
+ }
+ }
+
+ /// Deallocate a task from a per CPU runqueue but leave it in the global task list
+ /// where it can be reallocated if still in the RUNNING state.
+ ///
+ /// # Safety
+ /// This function is marked as unsafe as it will dereference an invalid pointer if
+ /// called with a task_node that is not contained within this queue.
+ pub unsafe fn deallocate(&mut self, task_node: TaskPointer) {
+ let mut cursor = self.tree().cursor_mut_from_ptr(task_node.as_ref());
+ cursor.remove();
+ task_node.task.borrow_mut().allocation = None;
+ }
+}
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::<TaskListAdapter>::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_list) = &self.list {
+ let mut cursor = task_list.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.borrow().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+}
+
+pub static TASKLIST: SpinLock<TaskList> = SpinLock::new(TaskList::new());
+
+pub fn create_task(
+ entry: extern "C" fn(),
+ flags: u16,
+ affinity: Option<u32>,
+) -> Result<TaskPointer, SvsmError> {
+ let mut task = Task::create(entry, flags)?;
+ task.set_affinity(affinity);
+ let node = Rc::new(TaskNode {
+ tree_link: RBTreeLink::default(),
+ list_link: Link::default(),
+ task: RefCell::new(task),
+ });
+ TASKLIST.lock().list().push_front(node.clone());
+ // Allocate any unallocated tasks (including the newly created one)
+ // to the current CPU
+ this_cpu_mut().runqueue.allocate(this_cpu().get_apic_id());
+ schedule();
+
+ Ok(node)
+}
+
+/// Check to see if the task scheduled on the current processor has the given id
+pub fn is_current_task(id: u32) -> bool {
+ match &this_cpu().runqueue.current_task {
+ Some(current_task) => current_task.task.borrow().id == id,
+ None => id == INITIAL_TASK_ID,
+ }
+}
+
+pub unsafe fn current_task_terminated() {
+ let task_node = this_cpu_mut()
+ .runqueue
+ .current_task
+ .as_mut()
+ .expect("Task termination handler called when there is no current task");
+
+ // Set the task state as terminated. The task context is still current until
+ // schedule() is called which will then keep a reference until some time
+ // after the task has been switched.
+ task_node.task.borrow_mut().state = TaskState::TERMINATED;
+ let mut tl = TASKLIST.lock();
+ let mut cursor = unsafe { tl.list().cursor_mut_from_ptr(task_node.as_ref()) };
+ cursor.remove(); | Just for symmetry I would have this as a method on `TaskList`, just like we have `RunQueue::deallocate()`, so something like:
```rust
TASKLIST.lock().terminate(task_node);
``` |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ops::DerefMut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.lock_read().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+ id: u32,
+}
+
+impl RunQueue {
+ /// Create a new runqueue for an id. The id would normally be set
+ /// to the APIC ID of the CPU that owns the runqueue and is used to
+ /// determine the affinity of tasks
+ pub const fn new(id: u32) -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ id,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.lock_read().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the lock_write below.
+ let (next_task_id, next_task_ptr) = {
+ let mut next_task = next_task_node.task.lock_write();
+ next_task.runtime.schedule_in();
+ (next_task.id, next_task.deref_mut() as *mut Box<Task>)
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.lock_write();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ let task_state = {
+ let mut task = task_node.task.lock_write();
+ task.runtime.schedule_out();
+ task.state
+ };
+
+ if task_state == TaskState::TERMINATED {
+ // The current task has terminated. Make sure it doesn't get added back
+ // into the runtime tree, but also we need to make sure we keep a
+ // reference to the task because the current stack is owned by it.
+ // Put it in a holding location which will be cleared by the next
+ // active task.
+ unsafe {
+ self.deallocate(task_node.clone());
+ }
+ self.terminated_task = Some(task_node);
+ None
+ } else {
+ // Reinsert the node into the tree so the position is updated with the new rutime | ```suggestion
// Reinsert the node into the tree so the position is updated with the new runtime
``` |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ops::DerefMut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {} | https://github.com/Amanieu/intrusive-rs/issues/47 seems to suggest the the default link types from the `intrusive-collections` crate are not meant to be used in multi-thread code. It specifically calls out that two threads could end up inserting the same node into two trees.
AFAICT `RunQueue::schedule` is written in such a way that two threads inserting at the same time should be possible: There are some checks that ensure that the thread is not already part of a run queue, but this function suffers from a race condition where the lock is briefly dropped after the check and only after a write-lock is acquired is the allocation commited to the task.
Have you considered using the atomic [link](https://docs.rs/intrusive-collections/latest/intrusive_collections/rbtree/struct.AtomicLink.html) [types](https://docs.rs/intrusive-collections/latest/intrusive_collections/rbtree/struct.AtomicLink.html)? |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ops::DerefMut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.lock_read().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+ id: u32,
+}
+
+impl RunQueue {
+ /// Create a new runqueue for an id. The id would normally be set
+ /// to the APIC ID of the CPU that owns the runqueue and is used to
+ /// determine the affinity of tasks
+ pub const fn new(id: u32) -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ id,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.lock_read().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0)); | Using `Bound::Unbounded` here (and in a couple other places) is slightly more efficient because the the `RBTree` won't have to get the key while traversing the nodes. |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ops::DerefMut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.lock_read().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+ id: u32,
+}
+
+impl RunQueue {
+ /// Create a new runqueue for an id. The id would normally be set
+ /// to the APIC ID of the CPU that owns the runqueue and is used to
+ /// determine the affinity of tasks
+ pub const fn new(id: u32) -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ id,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.lock_read().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the lock_write below.
+ let (next_task_id, next_task_ptr) = {
+ let mut next_task = next_task_node.task.lock_write();
+ next_task.runtime.schedule_in();
+ (next_task.id, next_task.deref_mut() as *mut Box<Task>)
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.lock_write();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ let task_state = {
+ let mut task = task_node.task.lock_write();
+ task.runtime.schedule_out();
+ task.state
+ };
+
+ if task_state == TaskState::TERMINATED {
+ // The current task has terminated. Make sure it doesn't get added back
+ // into the runtime tree, but also we need to make sure we keep a
+ // reference to the task because the current stack is owned by it.
+ // Put it in a holding location which will be cleared by the next
+ // active task.
+ unsafe {
+ self.deallocate(task_node.clone());
+ }
+ self.terminated_task = Some(task_node);
+ None
+ } else {
+ // Reinsert the node into the tree so the position is updated with the new rutime
+ let mut task_cursor = unsafe { self.tree().cursor_mut_from_ptr(task_node.as_ref()) };
+ task_cursor.remove();
+ self.tree().insert(task_node.clone());
+ Some(task_node)
+ }
+ }
+
+ /// Helper function that determines if a task is a candidate for allocating
+ /// to a CPU
+ fn is_cpu_candidate(&self, t: &Task) -> bool {
+ (t.state == TaskState::RUNNING)
+ && t.allocation.is_none()
+ && t.affinity.map_or(true, |a| a == self.id)
+ }
+
+ /// Iterate through all unallocated tasks and find a suitable candidates
+ /// for allocating to this queue
+ pub fn allocate(&mut self) {
+ let mut tl = TASKLIST.lock();
+ let lowest_runtime = if let Some(t) = self.tree().lower_bound(Bound::Included(&0)).get() {
+ t.task.lock_read().runtime.value()
+ } else {
+ 0
+ };
+ let mut cursor = tl.list().cursor_mut();
+ while !cursor.peek_next().is_null() {
+ cursor.move_next();
+ // Filter on running, unallocated tasks that either have no affinity
+ // or have an affinity for this CPU ID
+ if let Some(task_node) = cursor
+ .get()
+ .filter(|task_node| self.is_cpu_candidate(task_node.task.lock_read().as_ref()))
+ {
+ {
+ let mut t = task_node.task.lock_write();
+ t.allocation = Some(self.id);
+ t.runtime.set(lowest_runtime);
+ }
+ self.tree()
+ .insert(cursor.as_cursor().clone_pointer().unwrap());
+ }
+ }
+ }
+
+ /// Deallocate a task from a per CPU runqueue but leave it in the global task list
+ /// where it can be reallocated if still in the RUNNING state.
+ ///
+ /// # Safety
+ /// This function is marked as unsafe as it will dereference an invalid pointer if
+ /// called with a task_node that is not contained within this queue.
+ pub unsafe fn deallocate(&mut self, task_node: TaskPointer) {
+ let mut cursor = self.tree().cursor_mut_from_ptr(task_node.as_ref());
+ cursor.remove();
+ task_node.task.lock_write().allocation = None;
+ }
+}
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_list) = &self.list {
+ let mut cursor = task_list.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None | ```suggestion
let task_list = &self.list.as_ref()?;
let mut cursor = task_list.front();
while let Some(task_node) = cursor.get() {
if task_node.task.lock_read().id == id {
return cursor.clone_pointer();
}
cursor.move_next();
}
None
``` |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ops::DerefMut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::linked_list::Link;
+use intrusive_collections::{intrusive_adapter, Bound, KeyAdapter, LinkedList, RBTree, RBTreeLink};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeLink,
+ list_link: Link,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: Link });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.lock_read().runtime.value()
+ }
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+ id: u32,
+}
+
+impl RunQueue {
+ /// Create a new runqueue for an id. The id would normally be set
+ /// to the APIC ID of the CPU that owns the runqueue and is used to
+ /// determine the affinity of tasks
+ pub const fn new(id: u32) -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ id,
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.lock_read().id)
+ }
+
+ // Determine the next task to run on the vCPU that owns this instance.
+ // Returns a tuple that contains the next task and the previous task. If both
+ // are null then the existing task remains in scope.
+ //
+ // Note that this function does not actually perform the task switch. This is
+ // because it holds a mutable reference to self that must be released before
+ // the task switch occurs. Call this function from a global function that releases
+ // the reference before performing the task switch.
+ pub fn schedule(&mut self) -> (Option<*mut Box<Task>>, *mut Task) {
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let current_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Included(&0));
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Update the task we are switching to. Note that the next task may be
+ // the same as the current task so ensure we don't mutably borrow it twice
+ // by restricting the scope of the lock_write below.
+ let (next_task_id, next_task_ptr) = {
+ let mut next_task = next_task_node.task.lock_write();
+ next_task.runtime.schedule_in();
+ (next_task.id, next_task.deref_mut() as *mut Box<Task>)
+ };
+
+ let mut task_switch = true;
+ let current_task_ptr = current_task_node.map_or(core::ptr::null_mut(), |t| {
+ let mut current_task = t.task.lock_write();
+ task_switch = next_task_id != current_task.id;
+ current_task.as_mut() as *mut Task
+ });
+ if !task_switch {
+ (None, core::ptr::null_mut())
+ } else {
+ (Some(next_task_ptr), current_task_ptr)
+ }
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ let task_state = {
+ let mut task = task_node.task.lock_write();
+ task.runtime.schedule_out();
+ task.state
+ };
+
+ if task_state == TaskState::TERMINATED {
+ // The current task has terminated. Make sure it doesn't get added back
+ // into the runtime tree, but also we need to make sure we keep a
+ // reference to the task because the current stack is owned by it.
+ // Put it in a holding location which will be cleared by the next
+ // active task.
+ unsafe {
+ self.deallocate(task_node.clone());
+ }
+ self.terminated_task = Some(task_node);
+ None
+ } else {
+ // Reinsert the node into the tree so the position is updated with the new rutime
+ let mut task_cursor = unsafe { self.tree().cursor_mut_from_ptr(task_node.as_ref()) };
+ task_cursor.remove();
+ self.tree().insert(task_node.clone());
+ Some(task_node)
+ }
+ }
+
+ /// Helper function that determines if a task is a candidate for allocating
+ /// to a CPU
+ fn is_cpu_candidate(&self, t: &Task) -> bool {
+ (t.state == TaskState::RUNNING)
+ && t.allocation.is_none()
+ && t.affinity.map_or(true, |a| a == self.id)
+ }
+
+ /// Iterate through all unallocated tasks and find a suitable candidates
+ /// for allocating to this queue
+ pub fn allocate(&mut self) {
+ let mut tl = TASKLIST.lock();
+ let lowest_runtime = if let Some(t) = self.tree().lower_bound(Bound::Included(&0)).get() {
+ t.task.lock_read().runtime.value()
+ } else {
+ 0
+ };
+ let mut cursor = tl.list().cursor_mut();
+ while !cursor.peek_next().is_null() {
+ cursor.move_next();
+ // Filter on running, unallocated tasks that either have no affinity
+ // or have an affinity for this CPU ID
+ if let Some(task_node) = cursor
+ .get()
+ .filter(|task_node| self.is_cpu_candidate(task_node.task.lock_read().as_ref()))
+ {
+ {
+ let mut t = task_node.task.lock_write();
+ t.allocation = Some(self.id);
+ t.runtime.set(lowest_runtime);
+ }
+ self.tree()
+ .insert(cursor.as_cursor().clone_pointer().unwrap());
+ }
+ }
+ }
+
+ /// Deallocate a task from a per CPU runqueue but leave it in the global task list
+ /// where it can be reallocated if still in the RUNNING state.
+ ///
+ /// # Safety
+ /// This function is marked as unsafe as it will dereference an invalid pointer if
+ /// called with a task_node that is not contained within this queue.
+ pub unsafe fn deallocate(&mut self, task_node: TaskPointer) {
+ let mut cursor = self.tree().cursor_mut_from_ptr(task_node.as_ref());
+ cursor.remove();
+ task_node.task.lock_write().allocation = None;
+ }
+}
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_list) = &self.list {
+ let mut cursor = task_list.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn terminate(&mut self, task_node: TaskPointer) {
+ // Set the task state as terminated. If the task being terminated is the
+ // current task then the task context will still need to be in scope until
+ // the next schedule() has completed. Schedule will keep a reference to this
+ // task until some time after the context switch.
+ task_node.task.lock_write().state = TaskState::TERMINATED;
+ let mut cursor = unsafe { self.list().cursor_mut_from_ptr(task_node.as_ref()) };
+ cursor.remove();
+ }
+}
+
+pub static TASKLIST: SpinLock<TaskList> = SpinLock::new(TaskList::new());
+
+pub fn create_task(
+ entry: extern "C" fn(),
+ flags: u16,
+ affinity: Option<u32>,
+) -> Result<TaskPointer, SvsmError> {
+ let mut task = Task::create(entry, flags)?;
+ task.set_affinity(affinity);
+ let node = Arc::new(TaskNode {
+ tree_link: RBTreeLink::default(),
+ list_link: Link::default(),
+ task: RWLock::new(task),
+ });
+ {
+ // Ensure the tasklist lock is released before schedule() is called
+ // otherwise the lock will be held when switching to a new context
+ let mut tl = TASKLIST.lock();
+ tl.list().push_front(node.clone());
+ }
+ schedule();
+
+ Ok(node)
+}
+
+/// Check to see if the task scheduled on the current processor has the given id
+pub fn is_current_task(id: u32) -> bool {
+ match &this_cpu().runqueue().lock_read().current_task {
+ Some(current_task) => current_task.task.lock_read().id == id,
+ None => id == INITIAL_TASK_ID,
+ }
+}
+
+pub unsafe fn current_task_terminated() {
+ let mut rq = this_cpu().runqueue().lock_write();
+ let task_node = rq
+ .current_task
+ .as_mut()
+ .expect("Task termination handler called when there is no current task");
+ TASKLIST.lock().terminate(task_node.clone());
+}
+
+pub fn schedule() {
+ this_cpu_mut().allocate_tasks();
+
+ let (next_task, current_task) = this_cpu().runqueue().lock_write().schedule();
+ if let Some(next_task) = next_task {
+ unsafe { (*next_task).set_current(current_task) }; | How is it guaranteed the `next_task` is not aliased by another vCPU taking the task's lock? |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -191,19 +215,22 @@ pub struct PerCpu {
pub vrange_4k: VirtualRange,
/// Address allocator for per-cpu 2m temporary mappings
pub vrange_2m: VirtualRange,
+
+ /// Task list that has been assigned for scheduling on this CPU
+ runqueue: RWLock<RunQueue>,
}
impl Default for PerCpu {
fn default() -> Self {
- Self::new()
+ Self::new(0) | This `Default` implementation is unused. Have you considered just removing it? |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -156,4 +156,32 @@ impl<T: Debug> RWLock<T> {
data: unsafe { &mut *self.data.get() },
}
}
+
+ pub fn lock_write_direct(&self) -> *mut T {
+ // Waiting for current writer to finish
+ loop {
+ let val = self.wait_for_writers();
+ let (readers, _) = split_val(val);
+ let new_val = compose_val(readers, 1);
+
+ if self
+ .rwlock
+ .compare_exchange(val, new_val, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ {
+ break;
+ }
+ core::hint::spin_loop();
+ }
+
+ // Now locked for write - wait until all readers finished
+ let val: u64 = self.wait_for_readers();
+ assert!(val == compose_val(0, 1));
+ unsafe { &mut *self.data.get() }
+ }
+
+ pub fn unlock_write_direct(&self) { | This function should be unsafe. |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -156,4 +156,32 @@ impl<T: Debug> RWLock<T> {
data: unsafe { &mut *self.data.get() },
}
}
+
+ pub fn lock_write_direct(&self) -> *mut T {
+ // Waiting for current writer to finish
+ loop {
+ let val = self.wait_for_writers();
+ let (readers, _) = split_val(val);
+ let new_val = compose_val(readers, 1);
+
+ if self
+ .rwlock
+ .compare_exchange(val, new_val, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ {
+ break;
+ }
+ core::hint::spin_loop();
+ }
+
+ // Now locked for write - wait until all readers finished
+ let val: u64 = self.wait_for_readers();
+ assert!(val == compose_val(0, 1));
+ unsafe { &mut *self.data.get() } | ```suggestion
self.data.get()
``` |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -156,4 +156,32 @@ impl<T: Debug> RWLock<T> {
data: unsafe { &mut *self.data.get() },
}
}
+
+ pub fn lock_write_direct(&self) -> *mut T { | This function could be implemented in terms of `lock_write`:
```rust
let guard = self.lock_write();
core::mem::forget(guard); // Leak the guard.
self.data.get()
``` |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -315,19 +296,32 @@ impl Task {
}
extern "C" fn task_exit() {
- panic!("Current task has exited");
+ unsafe {
+ current_task_terminated();
+ }
+ schedule();
}
#[allow(unused)]
#[no_mangle]
extern "C" fn apply_new_context(new_task: *mut Task) -> u64 {
unsafe {
let mut pt = (*new_task).page_table.lock();
- pt.copy_entry(&this_cpu().get_pgtable(), PGTABLE_LVL3_IDX_PERCPU);
+ this_cpu().populate_page_table(&mut pt);
pt.cr3_value().bits() as u64
}
}
+#[allow(unused)]
+#[no_mangle]
+extern "C" fn on_switch(new_task: *mut Task) { | ```suggestion
extern "C" fn on_switch(new_task: &mut Task) {
```
That way no unsafe is required inside the function. |
svsm | github_2023 | others | 64 | coconut-svsm | Freax13 | @@ -0,0 +1,372 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::ptr::null_mut;
+
+use super::Task;
+use super::{tasks::TaskRuntime, TaskState, INITIAL_TASK_ID};
+use crate::cpu::percpu::{this_cpu, this_cpu_mut};
+use crate::error::SvsmError;
+use crate::locking::{RWLock, SpinLock};
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use intrusive_collections::{
+ intrusive_adapter, Bound, KeyAdapter, LinkedList, LinkedListAtomicLink, RBTree,
+ RBTreeAtomicLink,
+};
+
+pub type TaskPointer = Arc<TaskNode>;
+
+#[derive(Debug)]
+pub struct TaskNode {
+ tree_link: RBTreeAtomicLink,
+ list_link: LinkedListAtomicLink,
+ pub task: RWLock<Box<Task>>,
+}
+
+// SAFETY: Send + Sync is required for Arc<TaskNode> to implement Send. The `task`
+// member is Send + Sync but the intrusive_collection links are only Send. The only
+// access to these is via the intrusive_adapter! generated code which does not use
+// them concurrently across threads.
+unsafe impl Sync for TaskNode {}
+
+intrusive_adapter!(pub TaskTreeAdapter = TaskPointer: TaskNode { tree_link: RBTreeAtomicLink });
+intrusive_adapter!(pub TaskListAdapter = TaskPointer: TaskNode { list_link: LinkedListAtomicLink });
+
+impl<'a> KeyAdapter<'a> for TaskTreeAdapter {
+ type Key = u64;
+ fn get_key(&self, node: &'a TaskNode) -> u64 {
+ node.task.lock_read().runtime.value()
+ }
+}
+
+#[derive(Debug)]
+struct TaskSwitch {
+ previous_task: Option<TaskPointer>,
+ next_task: Option<TaskPointer>,
+}
+
+/// Task priority tree
+/// This contains a set of tasks that are ready to run sorted in
+/// order of priority.
+#[derive(Debug)]
+pub struct RunQueue {
+ tree: Option<RBTree<TaskTreeAdapter>>,
+ current_task: Option<TaskPointer>,
+ terminated_task: Option<TaskPointer>,
+ id: u32,
+ task_switch: TaskSwitch,
+}
+
+impl RunQueue {
+ /// Create a new runqueue for an id. The id would normally be set
+ /// to the APIC ID of the CPU that owns the runqueue and is used to
+ /// determine the affinity of tasks
+ pub const fn new(id: u32) -> Self {
+ Self {
+ tree: None,
+ current_task: None,
+ terminated_task: None,
+ id,
+ task_switch: TaskSwitch {
+ previous_task: None,
+ next_task: None,
+ },
+ }
+ }
+
+ pub fn tree(&mut self) -> &mut RBTree<TaskTreeAdapter> {
+ self.tree
+ .get_or_insert_with(|| RBTree::new(TaskTreeAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ if let Some(task_tree) = &self.tree {
+ let mut cursor = task_tree.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ }
+ None
+ }
+
+ pub fn current_task_id(&self) -> u32 {
+ self.current_task
+ .as_ref()
+ .map_or(INITIAL_TASK_ID, |t| t.task.lock_read().id)
+ }
+
+ /// Determine the next task to run on the vCPU that owns this instance.
+ /// Populates self.task_switchwith the next task and the previous task. If both
+ /// are None then the existing task remains in scope.
+ ///
+ /// Note that this function does not actually perform the task switch. This is
+ /// because it holds a mutable reference to self that must be released before
+ /// the task switch occurs. Call this function from a global function that releases
+ /// the reference before performing the task switch.
+ ///
+ /// # Returns
+ ///
+ /// Pointers to the next task and the previous task.
+ ///
+ /// If the next task pointer is null_mut() then no task switch is required and the
+ /// caller must release the runqueue lock.
+ ///
+ /// If the next task pointer is not null_mut() then the caller must call
+ /// next_task->set_current(prev_task) with the runqueue lock still held.
+ pub fn schedule(&mut self) -> (*mut Task, *mut Task) {
+ self.task_switch.previous_task = None;
+ self.task_switch.next_task = None;
+
+ // Update the state of the current task. This will change the runtime value which
+ // is used as a key in the RB tree therefore we need to remove and reinsert the
+ // task.
+ let prev_task_node = self.update_current_task();
+
+ // Find the task with the lowest runtime. The tree only contains running tasks that
+ // are to be scheduled on this vCPU.
+ let cursor = self.tree().lower_bound(Bound::Unbounded);
+
+ // The cursor will now be on the next task to schedule. There should always be
+ // a candidate task unless the current cpu task terminated. For now, don't support
+ // termination of the initial thread which means there will always be a task to schedule
+ let next_task_node = cursor.clone_pointer().expect("No task to schedule on CPU");
+ self.current_task = Some(next_task_node.clone());
+
+ // Lock the current and next tasks and keep track of the lock state by adding references
+ // into the structure itself. This allows us to retain the lock over the context switch
+ // and unlock the tasks before returning to the new context.
+ let prev_task_ptr = if let Some(prev_task_node) = prev_task_node {
+ // If the next task is the same as the current one then we have nothing to do.
+ if prev_task_node.task.lock_read().id == next_task_node.task.lock_read().id {
+ return (null_mut(), null_mut());
+ }
+ self.task_switch.previous_task = Some(prev_task_node.clone());
+ unsafe { (*prev_task_node.task.lock_write_direct()).as_mut() }
+ } else {
+ null_mut()
+ };
+ self.task_switch.next_task = Some(next_task_node.clone());
+ let next_task_ptr = unsafe { (*next_task_node.task.lock_write_direct()).as_mut() };
+
+ (next_task_ptr, prev_task_ptr)
+ }
+
+ fn update_current_task(&mut self) -> Option<TaskPointer> {
+ let task_node = self.current_task.take()?;
+ let task_state = {
+ let mut task = task_node.task.lock_write();
+ task.runtime.schedule_out();
+ task.state
+ };
+
+ if task_state == TaskState::TERMINATED {
+ // The current task has terminated. Make sure it doesn't get added back
+ // into the runtime tree, but also we need to make sure we keep a
+ // reference to the task because the current stack is owned by it.
+ // Put it in a holding location which will be cleared by the next
+ // active task.
+ unsafe {
+ self.deallocate(task_node.clone());
+ }
+ self.terminated_task = Some(task_node);
+ None
+ } else {
+ // Reinsert the node into the tree so the position is updated with the new runtime
+ let mut task_cursor = unsafe { self.tree().cursor_mut_from_ptr(task_node.as_ref()) };
+ task_cursor.remove();
+ self.tree().insert(task_node.clone());
+ Some(task_node)
+ }
+ }
+
+ /// Helper function that determines if a task is a candidate for allocating
+ /// to a CPU
+ fn is_cpu_candidate(&self, t: &Task) -> bool {
+ (t.state == TaskState::RUNNING)
+ && t.allocation.is_none()
+ && t.affinity.map_or(true, |a| a == self.id)
+ }
+
+ /// Iterate through all unallocated tasks and find a suitable candidates
+ /// for allocating to this queue
+ pub fn allocate(&mut self) {
+ let mut tl = TASKLIST.lock();
+ let lowest_runtime = if let Some(t) = self.tree().lower_bound(Bound::Unbounded).get() {
+ t.task.lock_read().runtime.value()
+ } else {
+ 0
+ };
+ let mut cursor = tl.list().cursor_mut();
+ while !cursor.peek_next().is_null() {
+ cursor.move_next();
+ // Filter on running, unallocated tasks that either have no affinity
+ // or have an affinity for this CPU ID
+ if let Some(task_node) = cursor
+ .get()
+ .filter(|task_node| self.is_cpu_candidate(task_node.task.lock_read().as_ref()))
+ {
+ {
+ let mut t = task_node.task.lock_write();
+ // Now we have the lock, check again that the task has not been allocated
+ // to another runqueue between the filter above and us taking the lock.
+ if t.allocation.is_some() {
+ continue;
+ }
+ t.allocation = Some(self.id);
+ t.runtime.set(lowest_runtime);
+ }
+ self.tree()
+ .insert(cursor.as_cursor().clone_pointer().unwrap());
+ }
+ }
+ }
+
+ /// Release the spinlock on the previous and next tasks following a task switch
+ fn unlock_tasks(&mut self) {
+ if let Some(previous_task) = self.task_switch.previous_task.as_ref() {
+ previous_task.task.unlock_write_direct();
+ self.task_switch.previous_task = None;
+ }
+ if let Some(next_task) = self.task_switch.next_task.as_ref() {
+ next_task.task.unlock_write_direct();
+ self.task_switch.next_task = None;
+ }
+ }
+
+ /// Deallocate a task from a per CPU runqueue but leave it in the global task list
+ /// where it can be reallocated if still in the RUNNING state.
+ ///
+ /// # Safety
+ /// This function is marked as unsafe as it will dereference an invalid pointer if
+ /// called with a task_node that is not contained within this queue.
+ pub unsafe fn deallocate(&mut self, task_node: TaskPointer) {
+ let mut cursor = self.tree().cursor_mut_from_ptr(task_node.as_ref());
+ cursor.remove();
+ task_node.task.lock_write().allocation = None;
+ }
+}
+
+/// Global task list
+/// This contains every task regardless of affinity or run state.
+#[derive(Debug)]
+pub struct TaskList {
+ list: Option<LinkedList<TaskListAdapter>>,
+}
+
+impl TaskList {
+ pub const fn new() -> Self {
+ Self { list: None }
+ }
+
+ pub fn list(&mut self) -> &mut LinkedList<TaskListAdapter> {
+ self.list
+ .get_or_insert_with(|| LinkedList::new(TaskListAdapter::new()))
+ }
+
+ pub fn get_task(&self, id: u32) -> Option<TaskPointer> {
+ let task_list = &self.list.as_ref()?;
+ let mut cursor = task_list.front();
+ while let Some(task_node) = cursor.get() {
+ if task_node.task.lock_read().id == id {
+ return cursor.clone_pointer();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub fn terminate(&mut self, task_node: TaskPointer) {
+ // Set the task state as terminated. If the task being terminated is the
+ // current task then the task context will still need to be in scope until
+ // the next schedule() has completed. Schedule will keep a reference to this
+ // task until some time after the context switch.
+ task_node.task.lock_write().state = TaskState::TERMINATED;
+ let mut cursor = unsafe { self.list().cursor_mut_from_ptr(task_node.as_ref()) };
+ cursor.remove();
+ }
+}
+
+pub static TASKLIST: SpinLock<TaskList> = SpinLock::new(TaskList::new());
+
+fn task_switch_hook(_: &Task) {
+ // The task switch is called with the runqueue lock held. Unlock it.
+ this_cpu_mut().runqueue().unlock_write_direct();
+
+ // Then unlock the spinlocks that protect the previous and new tasks.
+ this_cpu_mut().runqueue().lock_write().unlock_tasks();
+}
+
+pub fn create_task(
+ entry: extern "C" fn(),
+ flags: u16,
+ affinity: Option<u32>,
+) -> Result<TaskPointer, SvsmError> {
+ let mut task = Task::create(entry, flags)?;
+ task.set_affinity(affinity);
+ task.set_on_switch_hook(Some(task_switch_hook));
+ let node = Arc::new(TaskNode {
+ tree_link: RBTreeAtomicLink::default(),
+ list_link: LinkedListAtomicLink::default(),
+ task: RWLock::new(task),
+ });
+ {
+ // Ensure the tasklist lock is released before schedule() is called
+ // otherwise the lock will be held when switching to a new context
+ let mut tl = TASKLIST.lock();
+ tl.list().push_front(node.clone());
+ }
+ schedule();
+
+ Ok(node)
+}
+
+/// Check to see if the task scheduled on the current processor has the given id
+pub fn is_current_task(id: u32) -> bool {
+ match &this_cpu().runqueue().lock_read().current_task {
+ Some(current_task) => current_task.task.lock_read().id == id,
+ None => id == INITIAL_TASK_ID,
+ }
+}
+
+pub unsafe fn current_task_terminated() {
+ let mut rq = this_cpu().runqueue().lock_write();
+ let task_node = rq
+ .current_task
+ .as_mut()
+ .expect("Task termination handler called when there is no current task");
+ TASKLIST.lock().terminate(task_node.clone());
+}
+
+pub fn schedule() {
+ this_cpu_mut().allocate_tasks();
+
+ // The runqueue lock needs to be held until after the task switch has ocurred.
+ // The lock will either be released in schedule() if no task switch is required
+ // or in task_switch_hook(). | Why does the lock have to be held until the task switch? |
svsm | github_2023 | others | 69 | coconut-svsm | 00xc | @@ -21,6 +23,23 @@ pub enum SvsmResultCode {
PROTOCOL_BASE(u64),
}
+impl fmt::Display for SvsmResultCode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::SUCCESS => write!(f, "SUCCESS"),
+ Self::INCOMPLETE => write!(f, "INCOMPLETE"),
+ Self::UNSUPPORTED_PROTOCOL => write!(f, "UNSUPPORTED_PROTOCOL"),
+ Self::UNSUPPORTED_CALL => write!(f, "UNSUPPORTED_CALL"),
+ Self::INVALID_ADDRESS => write!(f, "INVALID_ADDRESS"),
+ Self::INVALID_FORMAT => write!(f, "INVALID_FORMAT"),
+ Self::INVALID_PARAMETER => write!(f, "INVALID_PARAMETER"),
+ Self::INVALID_REQUEST => write!(f, "INVALID_REQUEST"),
+ Self::BUSY => write!(f, "BUSY"),
+ Self::PROTOCOL_BASE(n) => write!(f, "PROTOCOL_BASE({})", n),
+ }
+ }
+}
+ | Can't we just use `Debug` for this? |
svsm | github_2023 | others | 69 | coconut-svsm | 00xc | @@ -44,6 +63,17 @@ pub enum SvsmReqError {
FatalError(SvsmError),
}
+impl fmt::Display for SvsmReqError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::RequestError(e) => write!(f, "RequestError({})", e),
+ // Display is not implemented for all SvsmError. For now we print
+ // a static string without following up with its sub-types.
+ Self::FatalError(_) => write!(f, "FatalError(SvsmError)"),
+ }
+ }
+}
+ | Same here, `Debug` is implemented for the type, so why not use it when printing the error instead? |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -8,6 +8,9 @@ else
TARGET_PATH="debug"
endif
+ifdef V | `make V=0` also enable the verbose mode, so what about this:
```
ifeq ($(V), 1)
CARGO_ARGS += -v
else ifeq ($(V), 2)
CARGO_ARGS += -vv
endif
``` |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -87,7 +87,7 @@ pub const SIZE_LEVEL0: usize = 1usize << ((9 * 0) + 12);
#[cfg(feature = "enable-gdb")]
pub const STACK_PAGES: usize = 16; | I don't know the details, but should we increase this as well?
Pre existing: maybe it's better to define a base and increment it when the gdb stub is enabled |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: MIT */ | `cargo clippy` suggests several improvements for this file:
```
warning: you should consider adding a `Default` implementation for `SnpGuestRequestMsgHdr`
--> src/psp/guest_request_msg.rs:103:5
|
103 | / pub fn new() -> SnpGuestRequestMsgHdr {
104 | | SnpGuestRequestMsgHdr {
105 | | authtag: [0; 32],
106 | | msg_seqno: 0,
... |
117 | | }
118 | | }
| |_____^
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#new_without_default
= note: `#[warn(clippy::new_without_default)]` on by default
help: try adding this
|
92 + impl Default for SnpGuestRequestMsgHdr {
93 + fn default() -> Self {
94 + Self::new()
95 + }
96 + }
|
warning: casting raw pointers to the same type and constness is unnecessary (`*const u8` -> `*const u8`)
--> src/psp/guest_request_msg.rs:158:24
|
158 | let msg_algo = &self.algo as *const _ as *const u8;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `&self.algo as *const _`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
= note: `#[warn(clippy::unnecessary_cast)]` on by default
warning: casting raw pointers to the same type and constness is unnecessary (`*mut u8` -> `*mut u8`)
--> src/psp/guest_request_msg.rs:223:17
|
223 | self.buffer.as_mut_ptr() as *mut u8,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `self.buffer.as_mut_ptr()`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
warning: use of `offset` with a `usize` casted to an `isize`
--> src/psp/guest_request_msg.rs:233:17
|
233 | self.buffer.as_mut_ptr::<u8>().offset(MSG_HDR_SIZE as isize),
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE)`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_offset_with_cast
= note: `#[warn(clippy::ptr_offset_with_cast)]` on by default
warning: unsafe function's docs miss `# Safety` section
--> src/psp/guest_request_msg.rs:239:5
|
239 | pub unsafe fn payload_offset(&mut self) -> *mut u8 {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#missing_safety_doc
= note: `#[warn(clippy::missing_safety_doc)]` on by default
warning: use of `offset` with a `usize` casted to an `isize`
--> src/psp/guest_request_msg.rs:240:9
|
240 | self.buffer.as_mut_ptr::<u8>().offset(MSG_HDR_SIZE as isize)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE)`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_offset_with_cast
warning: casting raw pointers to the same type and constness is unnecessary (`*const u8` -> `*const u8`)
--> src/psp/guest_request_msg.rs:265:37
|
265 | unsafe { from_raw_parts(plaintext.as_ptr() as *const u8, plaintext_len as usize) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `plaintext.as_ptr()`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
warning: this expression creates a reference which is immediately dereferenced by the compiler
--> src/psp/guest_request_msg.rs:272:34
|
272 | let gcm = Aes256Gcm::new(&key);
| ^^^^ help: change this to: `key`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow
= note: `#[warn(clippy::needless_borrow)]` on by default
warning: this expression creates a reference which is immediately dereferenced by the compiler
--> src/psp/guest_request_msg.rs:276:38
|
276 | let Ok(buffer) = gcm.encrypt(&nonce, payload) else {
| ^^^^^^ help: change this to: `nonce`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow
warning: this expression creates a reference which is immediately dereferenced by the compiler
--> src/psp/guest_request_msg.rs:329:34
|
329 | let gcm = Aes256Gcm::new(&key);
| ^^^^ help: change this to: `key`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow
warning: this expression creates a reference which is immediately dereferenced by the compiler
--> src/psp/guest_request_msg.rs:333:38
|
333 | let Ok(buffer) = gcm.decrypt(&nonce, payload) else {
| ^^^^^^ help: change this to: `nonce`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow
warning: manual `!RangeInclusive::contains` implementation
--> src/psp/guest_request_msg.rs:382:12
|
382 | if len < PAGE_SIZE || len > SNP_GUEST_REQ_MAX_DATA_SIZE {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use: `!(PAGE_SIZE..=SNP_GUEST_REQ_MAX_DATA_SIZE).contains(&len)`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#manual_range_contains
= note: `#[warn(clippy::manual_range_contains)]` on by default
warning: casting to the same type is unnecessary (`usize` -> `usize`)
--> src/psp/guest_request_msg.rs:412:31
|
412 | min(buf_size, SNP_GUEST_REQ_MAX_DATA_SIZE as usize),
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `SNP_GUEST_REQ_MAX_DATA_SIZE`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
warning: casting raw pointers to the same type and constness is unnecessary (`*const [u8; 16384]` -> `*const [u8; 16384]`)
--> src/psp/guest_request_msg.rs:419:19
|
419 | let buf = self.buffer.as_ptr() as *const [u8; SNP_GUEST_REQ_MAX_DATA_SIZE];
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `self.buffer.as_ptr()`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
warning: this `.into_iter()` call is equivalent to `.iter()` and will not consume the `slice`
--> src/psp/guest_request_msg.rs:422:32
|
422 | unsafe { (*buf)[..end].into_iter().all(|e| *e == 0) }
| ^^^^^^^^^ help: call directly: `iter`
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#into_iter_on_ref
= note: `#[warn(clippy::into_iter_on_ref)]` on by default
``` |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -15,7 +17,7 @@ pub struct SecretsPage {
pub fms: u32,
reserved_00c: u32,
pub gosvw: [u8; 16],
- pub vmpck: [[u8; 32]; VMPL_MAX],
+ pub vmpck: [[u8; VMPCK_SIZE]; VMPL_MAX], | Perhaps we could make this change in a separate commit. |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+extern crate alloc;
+
+use crate::address::VirtAddr;
+use crate::cpu::percpu::this_cpu_mut;
+use crate::error::SvsmError;
+use crate::protocols::errors::SvsmReqError;
+use crate::psp::guest_request_msg::{SnpGuestRequestExtData, SnpGuestRequestMsg};
+use crate::sev::ghcb::GhcbError;
+use crate::sev::secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE};
+use crate::{getter_func, BIT};
+
+use alloc::vec::Vec;
+
+///
+/// Hypervisor error codes
+///
+
+/// BIT!(32)
+pub const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// BIT!(33)
+pub const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+#[derive(Debug)]
+pub struct SnpGuestRequestCmd {
+ // SNP_GUEST_REQUEST requires two pages: one for
+ // the request and another for the response message. Both
+ // of them have to be assigned to the hypervisor (shared).
+ request: SnpGuestRequestMsg,
+ response: SnpGuestRequestMsg,
+
+ // SNP Extended Guest Request. Its pages are also shared
+ // with the hypervisor
+ ext_data: SnpGuestRequestExtData,
+
+ msg_seqno: u64,
+
+ is_initialized: bool,
+}
+
+impl SnpGuestRequestCmd {
+ getter_func!(ext_data, SnpGuestRequestExtData);
+
+ pub const fn new() -> Self {
+ Self {
+ request: SnpGuestRequestMsg::new(),
+ response: SnpGuestRequestMsg::new(),
+ ext_data: SnpGuestRequestExtData::new(),
+ msg_seqno: 0,
+ is_initialized: false,
+ }
+ }
+
+ pub fn init(&mut self) -> Result<(), SvsmReqError> {
+ if !self.is_initialized {
+ self.request.alloc()?;
+ self.response.alloc()?;
+ self.ext_data.alloc()?;
+
+ // The SNP ABI spec says the request, response and data pages have
+ // to be shared with the hypervisor
+ self.request.set_shared()?;
+ self.response.set_shared()?;
+ self.ext_data.set_shared()?;
+
+ self.is_initialized = true;
+ }
+
+ Ok(())
+ }
+
+ fn seqno_last_used(&self) -> u64 {
+ self.msg_seqno
+ }
+
+ fn seqno_add_two(&mut self) {
+ self.msg_seqno += 2;
+ }
+
+ /// Call the GHCB layer to send the encrypted SNP_GUEST_REQUEST message
+ /// to the AMD Secure Processor (PSP).
+ fn send(&mut self, extended: bool) -> Result<(), SvsmReqError> {
+ self.response.clear();
+
+ if extended {
+ this_cpu_mut().ghcb().guest_ext_request(
+ self.request.as_va(),
+ self.response.as_va(),
+ self.ext_data.as_va(),
+ self.ext_data.npages(),
+ )?;
+ } else {
+ this_cpu_mut()
+ .ghcb()
+ .guest_request(self.request.as_va(), self.response.as_va())?;
+ }
+
+ // The PSP firmware increases the sequence number only when
+ // it receives a request successfully. Hence, we sync our
+ // sequence number (add two) only when we receive a response
+ // successfully.
+ self.seqno_add_two();
+
+ Ok(())
+ }
+
+ /// Send a SNP_GUEST_REQUEST message to the AMD Secure processor (PSP) following
+ /// the GHCB protocol. Messages are a encrypted/decrypted using AES_GCM. Each
+ /// message has sequence number, which is monotonic.
+ ///
+ /// @msg_type = SNP_GUEST_REQUEST type stored in the payload
+ /// @extended = whether or not it is an extended SNP Guest Request
+ /// @payload = VirtAddr of the request, which will be encrypted
+ /// @payload_size = size of the payload
+ pub fn send_request(
+ &mut self,
+ msg_type: u8,
+ extended: bool,
+ payload: VirtAddr,
+ payload_size: u16,
+ ) -> Result<Vec<u8>, SvsmReqError> {
+ if !self.is_initialized {
+ return Err(SvsmReqError::invalid_request());
+ }
+ if is_vmpck0_clear() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let Some(msg_seqno) = self.seqno_last_used().checked_add(1) else {
+ log::error!("Encryption: sequence number overflow"); | To understand better, so when we exhaust all the sequence numbers, we will always be in error? |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors:
+ * Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+extern crate alloc;
+
+use crate::getter_func;
+use crate::protocols::errors::SvsmReqError;
+
+use alloc::vec::Vec;
+
+/// 64 | Maybe a leftover? If not, we could explain why 64 |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: MIT */ | Some tips from `cargo clippy`:
```
warning: unused import: `crate::types::PAGE_SIZE`
--> src/psp/request.rs:17:5
|
17 | use crate::types::PAGE_SIZE;
| ^^^^^^^^^^^^^^^^^^^^^^^
|
= note: `#[warn(unused_imports)]` on by default
warning: unused imports: `allocate_pages`, `free_page`, `get_order`
--> src/psp/request.rs:22:24
|
22 | use crate::mm::alloc::{allocate_pages, free_page, get_order};
| ^^^^^^^^^^^^^^ ^^^^^^^^^ ^^^^^^^^^
``` |
svsm | github_2023 | others | 69 | coconut-svsm | 00xc | @@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::mm::alloc::{allocate_page, allocate_pages, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+use crate::{funcs, getter_func};
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use alloc::vec::Vec;
+use core::cmp::min;
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+///
+/// AES_GCM
+///
+
+/// 1
+const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+/// In the SEV-SNP ABI spec, the authentication tag should be at most 128 bits.
+/// 16
+const AUTHTAG_SIZE: usize = 16;
+/// In the SEV-SNP ABI spec, the IV should be at most 96 bits; but the bits
+/// not used must be zeroed.
+/// 12
+const IV_SIZE: usize = 12;
+
+///
+/// SnpGuestRequestMsg types
+///
+
+/// 0
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// 5
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// 6
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+///
+/// SnpGuestRequestMsg version
+///
+
+/// 1
+const HDR_VERSION: u8 = 1;
+/// 1
+const MSG_VERSION: u8 = 1;
+
+///
+/// SnpGuestRequestMsg size
+///
+
+/// PAGE_SIZE - MSG_PAYLOAD_SIZE
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// 4000
+const MSG_PAYLOAD_SIZE: usize = 4000;
+/// 0x4000 (4 pages)
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 0x4000;
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsgHdr {
+ authtag: [u8; 32usize],
+ msg_seqno: u64,
+ rsvd1: [u8; 8usize],
+ algo: u8,
+ hdr_version: u8,
+ hdr_sz: u16,
+ msg_type: u8,
+ msg_version: u8,
+ msg_sz: u16,
+ rsvd2: u32,
+ msg_vmpck: u8,
+ rsvd3: [u8; 35usize],
+}
+
+#[allow(dead_code)]
+impl SnpGuestRequestMsgHdr {
+ getter_func!(authtag, [u8; 32]);
+ funcs!(msg_seqno, u64);
+ funcs!(algo, u8);
+ funcs!(hdr_version, u8);
+ funcs!(hdr_sz, u16);
+ funcs!(msg_type, u8);
+ funcs!(msg_version, u8);
+ funcs!(msg_sz, u16);
+ funcs!(msg_vmpck, u8);
+
+ pub fn new() -> SnpGuestRequestMsgHdr {
+ SnpGuestRequestMsgHdr {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ fn set_authtag_from_slice(&mut self, tag: &[u8]) -> Result<(), SvsmReqError> {
+ if tag.len() < AUTHTAG_SIZE {
+ return Err(SvsmReqError::invalid_format());
+ }
+ self.authtag[..AUTHTAG_SIZE].copy_from_slice(&tag[..AUTHTAG_SIZE]);
+ Ok(())
+ }
+
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let e: SvsmReqError = SvsmReqError::invalid_request();
+
+ let Ok(header_size) = u16::try_from(MSG_HDR_SIZE) else {
+ return Err(e);
+ };
+ if self.hdr_version != HDR_VERSION {
+ return Err(e);
+ }
+ if self.hdr_sz != header_size {
+ return Err(e);
+ }
+ if self.algo != SNP_AEAD_AES_256_GCM {
+ return Err(e);
+ }
+ if self.msg_type != msg_type {
+ return Err(e);
+ }
+ if self.msg_vmpck != 0 {
+ return Err(e);
+ }
+ if self.msg_seqno != *msg_seqno {
+ return Err(e);
+ }
+
+ Ok(())
+ }
+
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const _ as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsg {
+ /// PAGE_SIZE that carries the actual structure:
+ ///
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// hdr: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// }
+ ///
+ pub buffer: VirtAddr,
+}
+
+impl SnpGuestRequestMsg {
+ pub const fn new() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ pub fn alloc(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ Ok(())
+ }
+
+ pub fn set_shared(&self) -> Result<(), SvsmReqError> {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ Ok(())
+ }
+
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr() as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ fn set_payload(&mut self, payload: *const u8, len: usize) {
+ unsafe {
+ copy_nonoverlapping(
+ payload,
+ self.buffer.as_mut_ptr::<u8>().offset(MSG_HDR_SIZE as isize),
+ len,
+ );
+ }
+ }
+
+ pub unsafe fn payload_offset(&mut self) -> *mut u8 {
+ self.buffer.as_mut_ptr::<u8>().offset(MSG_HDR_SIZE as isize)
+ }
+
+ // Encrypt the given plaintext and save the obtained ciphertext in the message payload
+ pub fn encrypt_save(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ plaintext: VirtAddr,
+ plaintext_len: u16,
+ ) -> Result<(), SvsmReqError> {
+ // Construct message header
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new();
+ msg_hdr.set_hdr_sz(MSG_HDR_SIZE as u16);
+ msg_hdr.set_algo(SNP_AEAD_AES_256_GCM);
+ msg_hdr.set_hdr_version(HDR_VERSION);
+ msg_hdr.set_msg_sz(plaintext_len);
+ msg_hdr.set_msg_type(msg_type);
+ msg_hdr.set_msg_version(MSG_VERSION);
+ msg_hdr.set_msg_vmpck(0);
+ msg_hdr.set_msg_seqno(msg_seqno);
+
+ let aad_slice: &[u8] = msg_hdr.get_aad_slice();
+ let plaintext_slice: &[u8] =
+ unsafe { from_raw_parts(plaintext.as_ptr() as *const u8, plaintext_len as usize) };
+ let payload = Payload {
+ msg: plaintext_slice,
+ aad: aad_slice,
+ };
+
+ let key = Key::<Aes256Gcm>::from_slice(vmpck0);
+ let gcm = Aes256Gcm::new(&key);
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let nonce = Nonce::from_slice(&iv);
+
+ let Ok(buffer) = gcm.encrypt(&nonce, payload) else {
+ log::warn!("AES_GCM.encrypt() failed");
+ return Err(SvsmReqError::invalid_format());
+ };
+
+ // RustCrypto AES_GCM.encrypt() returns a postfix authenticated tag (i.e. ciphertext + tag)
+ // Copy the authtag to the message header.
+ let ciphertext_len = buffer.len() - AUTHTAG_SIZE;
+ let (ciphertext, tag) = buffer.split_at(plaintext_len as usize);
+ msg_hdr.set_authtag_from_slice(tag)?;
+
+ self.clear();
+ self.set_hdr(&msg_hdr);
+ self.set_payload(ciphertext.as_ptr(), ciphertext_len);
+
+ Ok(())
+ }
+
+ /// Decrypt the message payload and return the plaintext obtained
+ pub fn decrypt_get(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ ) -> Result<Vec<u8>, SvsmReqError> {
+ let msg_hdr = self.get_hdr_copy();
+
+ msg_hdr.validate(msg_type, &msg_seqno)?;
+
+ // Make sure the message payload have space also for the authenticated tag.
+ if msg_hdr.msg_sz() as usize + AUTHTAG_SIZE > MSG_PAYLOAD_SIZE {
+ return Err(SvsmReqError::incomplete());
+ }
+
+ let aad_slice: &[u8] = msg_hdr.get_aad_slice();
+ let payload_slice = unsafe {
+ from_raw_parts_mut(
+ self.payload_offset(),
+ msg_hdr.msg_sz() as usize + AUTHTAG_SIZE,
+ )
+ };
+
+ // Append the authenticated tag to the message payload.
+ // RustCrypto AES_GCM requires postfix authtag.
+ let start: usize = usize::from(msg_hdr.msg_sz());
+ payload_slice[start..].copy_from_slice(&msg_hdr.authtag()[..AUTHTAG_SIZE]);
+
+ let payload = Payload {
+ msg: payload_slice,
+ aad: aad_slice,
+ };
+
+ let key = Key::<Aes256Gcm>::from_slice(vmpck0);
+ let gcm = Aes256Gcm::new(&key);
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let nonce = Nonce::from_slice(&iv);
+
+ let Ok(buffer) = gcm.decrypt(&nonce, payload) else {
+ log::warn!("AES_GCM.decrypt() failed");
+ return Err(SvsmReqError::invalid_format());
+ };
+
+ Ok(buffer)
+ }
+}
+
+fn build_iv(msg_seqno: u64) -> [u8; IV_SIZE] {
+ const U64_SIZE: usize = core::mem::size_of::<u64>();
+ let mut iv: [u8; IV_SIZE] = [0u8; IV_SIZE];
+
+ iv[..U64_SIZE].copy_from_slice(&msg_seqno.to_ne_bytes());
+ iv
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestExtData {
+ buffer: VirtAddr,
+ len: usize,
+}
+
+impl SnpGuestRequestExtData {
+ pub const fn new() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ len: 0,
+ }
+ }
+
+ pub fn alloc(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_pages(get_order(SNP_GUEST_REQ_MAX_DATA_SIZE))?;
+ assert!(self.buffer.is_page_aligned());
+ self.len = SNP_GUEST_REQ_MAX_DATA_SIZE;
+ Ok(())
+ }
+
+ pub fn set_shared(&mut self) -> Result<(), SvsmReqError> {
+ let start = usize::from(self.buffer);
+ let end = start + self.len;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_shared_4k(vpage)?;
+ }
+ Ok(())
+ }
+
+ pub fn set_len(&mut self, len: usize) -> Result<(), SvsmReqError> {
+ if len < PAGE_SIZE || len > SNP_GUEST_REQ_MAX_DATA_SIZE {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+ Ok(())
+ } | I think setting the `len` field is missing in this method. |
svsm | github_2023 | others | 69 | coconut-svsm | stefano-garzarella | @@ -8,6 +8,11 @@ else
TARGET_PATH="debug" | nit: if you need to resend, we can update the commit description |
svsm | github_2023 | others | 69 | coconut-svsm | 00xc | @@ -85,11 +85,11 @@ pub const SIZE_LEVEL0: usize = 1usize << ((9 * 0) + 12);
// Stack definitions
// The GDB stub requires a larger stack.
#[cfg(feature = "enable-gdb")]
-pub const STACK_PAGES_GDB: usize = 12;
+pub const STACK_PAGES_GDB: usize = 4;
#[cfg(not(feature = "enable-gdb"))]
pub const STACK_PAGES_GDB: usize = 0;
-pub const STACK_PAGES: usize = 4 + STACK_PAGES_GDB;
+pub const STACK_PAGES: usize = 12 + STACK_PAGES_GDB; | Have you been able to pinpoint where in the code the stack usage caused the double fault? |
svsm | github_2023 | others | 69 | coconut-svsm | roy-hopkins | @@ -4,5 +4,5 @@ build-std-features = ["compiler-builtins-mem"]
[build]
target = "svsm-target.json"
-rustflags = ["-C", "force-frame-pointers"]
+rustflags = ["-C", "force-frame-pointers", "--cfg", "aes_force_soft", "--cfg", "polyval_force_soft"] | I assume this is using the software implementation because AES-NI is unavailable due to SSE not being supported/initialised in the svsm? Is this something we can/should address? |
svsm | github_2023 | others | 69 | coconut-svsm | roy-hopkins | @@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT OR Apache-2.0 */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+///
+/// This file follows the AMD SEV-SNP spec v1.54
+///
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::mm::alloc::{allocate_page, allocate_pages, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+use crate::{funcs, getter_func};
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use alloc::vec::Vec;
+use core::cmp::min;
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+/// Table 99 - AEAD Algorithm Encodings
+///
+/// 1
+const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+/// Table 98 - Message Header Format
+///
+/// If the authentication tag for the designated algorithm is
+/// shorter than 32 B, the first bytes of AUTHTAG are used and
+/// the remaining bytes must be zero.
+///
+/// 16
+const AUTHTAG_SIZE: usize = 16;
+///
+/// The PSP firmware constructs the incoming 96-bit IV. The firmware
+/// sets bits IV[63:0] to the MSG_SEQNO and bits IV[95:64] to 0h.
+///
+/// 12
+const IV_SIZE: usize = 12;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const HDR_VERSION: u8 = 1;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const MSG_VERSION: u8 = 1;
+
+/// Table 100 - Message Type Encodings
+///
+/// 0
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// 5
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// 6
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+/// PAGE_SIZE - MSG_PAYLOAD_SIZE
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// 4000
+const MSG_PAYLOAD_SIZE: usize = 4000;
+/// 0x4000 (4 pages)
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 0x4000;
+
+/// Table 98 - Message Header Format
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsgHdr {
+ authtag: [u8; 32],
+ msg_seqno: u64,
+ rsvd1: [u8; 8],
+ algo: u8,
+ hdr_version: u8,
+ hdr_sz: u16,
+ msg_type: u8,
+ msg_version: u8,
+ msg_sz: u16,
+ rsvd2: u32,
+ msg_vmpck: u8,
+ rsvd3: [u8; 35],
+}
+
+#[allow(dead_code)]
+impl SnpGuestRequestMsgHdr {
+ getter_func!(authtag, [u8; 32]);
+ funcs!(msg_seqno, u64);
+ funcs!(algo, u8);
+ funcs!(hdr_version, u8);
+ funcs!(hdr_sz, u16);
+ funcs!(msg_type, u8);
+ funcs!(msg_version, u8);
+ funcs!(msg_sz, u16);
+ funcs!(msg_vmpck, u8);
+
+ pub fn new() -> SnpGuestRequestMsgHdr {
+ SnpGuestRequestMsgHdr {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ fn set_authtag_from_slice(&mut self, tag: &[u8]) -> Result<(), SvsmReqError> {
+ if tag.len() < AUTHTAG_SIZE {
+ return Err(SvsmReqError::invalid_format());
+ }
+ self.authtag[..AUTHTAG_SIZE].copy_from_slice(&tag[..AUTHTAG_SIZE]);
+ Ok(())
+ }
+
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let e: SvsmReqError = SvsmReqError::invalid_request();
+
+ let Ok(header_size) = u16::try_from(MSG_HDR_SIZE) else {
+ return Err(e);
+ };
+ if self.hdr_version != HDR_VERSION {
+ return Err(e);
+ }
+ if self.hdr_sz != header_size {
+ return Err(e);
+ }
+ if self.algo != SNP_AEAD_AES_256_GCM {
+ return Err(e);
+ }
+ if self.msg_type != msg_type {
+ return Err(e);
+ }
+ if self.msg_vmpck != 0 {
+ return Err(e);
+ }
+ if self.msg_seqno != *msg_seqno {
+ return Err(e);
+ }
+
+ Ok(())
+ }
+
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsg {
+ /// PAGE_SIZE that carries the actual structure:
+ ///
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// hdr: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// }
+ ///
+ pub buffer: VirtAddr,
+}
+
+impl SnpGuestRequestMsg {
+ pub const fn new() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ pub fn alloc(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?; | Should this allocation (and the sharing of the page with the host) be released when the `SnpGuestRequestMsg` is dropped? |
svsm | github_2023 | others | 69 | coconut-svsm | roy-hopkins | @@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT OR Apache-2.0 */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+///
+/// This file follows the AMD SEV-SNP spec v1.54
+///
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::mm::alloc::{allocate_page, allocate_pages, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+use crate::{funcs, getter_func};
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use alloc::vec::Vec;
+use core::cmp::min;
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+/// Table 99 - AEAD Algorithm Encodings
+///
+/// 1
+const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+/// Table 98 - Message Header Format
+///
+/// If the authentication tag for the designated algorithm is
+/// shorter than 32 B, the first bytes of AUTHTAG are used and
+/// the remaining bytes must be zero.
+///
+/// 16
+const AUTHTAG_SIZE: usize = 16;
+///
+/// The PSP firmware constructs the incoming 96-bit IV. The firmware
+/// sets bits IV[63:0] to the MSG_SEQNO and bits IV[95:64] to 0h.
+///
+/// 12
+const IV_SIZE: usize = 12;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const HDR_VERSION: u8 = 1;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const MSG_VERSION: u8 = 1;
+
+/// Table 100 - Message Type Encodings
+///
+/// 0
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// 5
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// 6
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+/// PAGE_SIZE - MSG_PAYLOAD_SIZE
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// 4000
+const MSG_PAYLOAD_SIZE: usize = 4000;
+/// 0x4000 (4 pages)
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 0x4000;
+
+/// Table 98 - Message Header Format
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsgHdr {
+ authtag: [u8; 32],
+ msg_seqno: u64,
+ rsvd1: [u8; 8],
+ algo: u8,
+ hdr_version: u8,
+ hdr_sz: u16,
+ msg_type: u8,
+ msg_version: u8,
+ msg_sz: u16,
+ rsvd2: u32,
+ msg_vmpck: u8,
+ rsvd3: [u8; 35],
+}
+
+#[allow(dead_code)]
+impl SnpGuestRequestMsgHdr {
+ getter_func!(authtag, [u8; 32]);
+ funcs!(msg_seqno, u64);
+ funcs!(algo, u8);
+ funcs!(hdr_version, u8);
+ funcs!(hdr_sz, u16);
+ funcs!(msg_type, u8);
+ funcs!(msg_version, u8);
+ funcs!(msg_sz, u16);
+ funcs!(msg_vmpck, u8);
+
+ pub fn new() -> SnpGuestRequestMsgHdr {
+ SnpGuestRequestMsgHdr {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ fn set_authtag_from_slice(&mut self, tag: &[u8]) -> Result<(), SvsmReqError> {
+ if tag.len() < AUTHTAG_SIZE {
+ return Err(SvsmReqError::invalid_format());
+ }
+ self.authtag[..AUTHTAG_SIZE].copy_from_slice(&tag[..AUTHTAG_SIZE]);
+ Ok(())
+ }
+
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let e: SvsmReqError = SvsmReqError::invalid_request();
+
+ let Ok(header_size) = u16::try_from(MSG_HDR_SIZE) else {
+ return Err(e);
+ };
+ if self.hdr_version != HDR_VERSION {
+ return Err(e);
+ }
+ if self.hdr_sz != header_size {
+ return Err(e);
+ }
+ if self.algo != SNP_AEAD_AES_256_GCM {
+ return Err(e);
+ }
+ if self.msg_type != msg_type {
+ return Err(e);
+ }
+ if self.msg_vmpck != 0 {
+ return Err(e);
+ }
+ if self.msg_seqno != *msg_seqno {
+ return Err(e);
+ }
+
+ Ok(())
+ }
+
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsg {
+ /// PAGE_SIZE that carries the actual structure:
+ ///
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// hdr: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// }
+ ///
+ pub buffer: VirtAddr,
+}
+
+impl SnpGuestRequestMsg {
+ pub const fn new() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ pub fn alloc(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ Ok(())
+ }
+
+ pub fn set_shared(&self) -> Result<(), SvsmReqError> {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?; | As far as I can tell, clearing the C bit will allow the host to read the previous contents of the page, but the page will only contain ciphertext. Should the page be cleared at this point as a precaution though? |
svsm | github_2023 | others | 69 | coconut-svsm | deeglaze | @@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT OR Apache-2.0 */
+/*
+ * Copyright (C) 2023 IBM
+ *
+ * Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+ */
+
+///
+/// This file follows the AMD SEV-SNP spec v1.54
+///
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::mm::alloc::{allocate_page, allocate_pages, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+use crate::{funcs, getter_func};
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use alloc::vec::Vec;
+use core::cmp::min;
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+/// Table 99 - AEAD Algorithm Encodings
+///
+/// 1
+const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+/// Table 98 - Message Header Format
+///
+/// If the authentication tag for the designated algorithm is
+/// shorter than 32 B, the first bytes of AUTHTAG are used and
+/// the remaining bytes must be zero.
+///
+/// 16
+const AUTHTAG_SIZE: usize = 16;
+///
+/// The PSP firmware constructs the incoming 96-bit IV. The firmware
+/// sets bits IV[63:0] to the MSG_SEQNO and bits IV[95:64] to 0h.
+///
+/// 12
+const IV_SIZE: usize = 12;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const HDR_VERSION: u8 = 1;
+///
+/// Set to 1 in the SEV-SNP ABI spec v1.54
+///
+/// 1
+const MSG_VERSION: u8 = 1;
+
+/// Table 100 - Message Type Encodings
+///
+/// 0
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// 5
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// 6
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+/// PAGE_SIZE - MSG_PAYLOAD_SIZE
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// 4000
+const MSG_PAYLOAD_SIZE: usize = 4000;
+/// 0x4000 (4 pages)
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 0x4000;
+
+/// Table 98 - Message Header Format
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsgHdr {
+ authtag: [u8; 32],
+ msg_seqno: u64,
+ rsvd1: [u8; 8],
+ algo: u8,
+ hdr_version: u8,
+ hdr_sz: u16,
+ msg_type: u8,
+ msg_version: u8,
+ msg_sz: u16,
+ rsvd2: u32,
+ msg_vmpck: u8,
+ rsvd3: [u8; 35],
+}
+
+#[allow(dead_code)]
+impl SnpGuestRequestMsgHdr {
+ getter_func!(authtag, [u8; 32]);
+ funcs!(msg_seqno, u64);
+ funcs!(algo, u8);
+ funcs!(hdr_version, u8);
+ funcs!(hdr_sz, u16);
+ funcs!(msg_type, u8);
+ funcs!(msg_version, u8);
+ funcs!(msg_sz, u16);
+ funcs!(msg_vmpck, u8);
+
+ pub fn new() -> SnpGuestRequestMsgHdr {
+ SnpGuestRequestMsgHdr {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ fn set_authtag_from_slice(&mut self, tag: &[u8]) -> Result<(), SvsmReqError> {
+ if tag.len() < AUTHTAG_SIZE {
+ return Err(SvsmReqError::invalid_format());
+ }
+ self.authtag[..AUTHTAG_SIZE].copy_from_slice(&tag[..AUTHTAG_SIZE]);
+ Ok(())
+ }
+
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let e: SvsmReqError = SvsmReqError::invalid_request();
+
+ let Ok(header_size) = u16::try_from(MSG_HDR_SIZE) else {
+ return Err(e);
+ };
+ if self.hdr_version != HDR_VERSION {
+ return Err(e);
+ }
+ if self.hdr_sz != header_size {
+ return Err(e);
+ }
+ if self.algo != SNP_AEAD_AES_256_GCM {
+ return Err(e);
+ }
+ if self.msg_type != msg_type {
+ return Err(e);
+ }
+ if self.msg_vmpck != 0 {
+ return Err(e);
+ }
+ if self.msg_seqno != *msg_seqno {
+ return Err(e);
+ }
+
+ Ok(())
+ }
+
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct SnpGuestRequestMsg {
+ /// PAGE_SIZE that carries the actual structure:
+ ///
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// hdr: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// }
+ ///
+ pub buffer: VirtAddr,
+}
+
+impl SnpGuestRequestMsg {
+ pub const fn new() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ pub fn alloc(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ Ok(())
+ }
+
+ pub fn set_shared(&self) -> Result<(), SvsmReqError> {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ Ok(())
+ }
+
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr::<u8>(),
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ fn set_payload(&mut self, payload: *const u8, len: usize) {
+ unsafe {
+ copy_nonoverlapping(
+ payload,
+ self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE),
+ len,
+ );
+ }
+ }
+
+ pub fn payload_offset(&mut self) -> *mut u8 {
+ unsafe { self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE) }
+ }
+
+ // Encrypt the given plaintext and save the obtained ciphertext in the message payload
+ pub fn encrypt_save( | Can these cryptographic implementations be stubbed into an interface that can be instantiated from the outset? If we (Google) are to use this package, we'll be required to link with BoringSSL and use its aes implementation. I haven't done much Rust programming yet, so I'm not sure the right idiom to do this. |
svsm | github_2023 | others | 69 | coconut-svsm | daaltobe | @@ -0,0 +1,196 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+/// This files provides an API to request SNP_GUEST_REQUEST services
+/// to the AMD Secure Processor (a.k.a. PSP).
+///
+/// The PSP accepts only one SNP_GUEST_REQUEST at a time, we ensure that by
+/// having a global GREQ_DRIVER under a spinlock.
+///
+/// This API can be reached from outside the SVSM (e.g. OVMF, guest kernel)
+/// through the SVSM runtime services (protocols).
+///
+/// Further information can be found in the Secure Nested Paging Firmware ABI
+/// Specification, Chapter 7.
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::greq::driver::SnpGuestRequestDriver;
+use crate::greq::msg::SNP_MSG_REPORT_REQ;
+use crate::greq::msg_report::{SnpReportRequest, SnpReportResponse};
+use crate::locking::{LockGuard, SpinLock};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::disable_vmpck0;
+
+use log;
+
+// SNP_GUEST_REQUEST driver
+static GREQ_DRIVER: SpinLock<SnpGuestRequestDriver> =
+ SpinLock::new(SnpGuestRequestDriver::default());
+
+pub fn greq_driver_init() {
+ if let Err(e) = GREQ_DRIVER.lock().init() {
+ log::error!("SNP_GUEST_REQUEST driver failed to initialize, e={:?}", e);
+ disable_vmpck0();
+ }
+}
+
+fn get_report_common(
+ driver: &mut LockGuard<SnpGuestRequestDriver>,
+ buffer: VirtAddr,
+ buffer_size: usize,
+ extended: bool,
+) -> Result<usize, SvsmReqError> {
+ const REPORT_REQUEST_SIZE: usize = core::mem::size_of::<SnpReportRequest>();
+ const REPORT_RESPONSE_SIZE: usize = core::mem::size_of::<SnpReportResponse>();
+
+ if buffer.is_null() || REPORT_REQUEST_SIZE > buffer_size || REPORT_RESPONSE_SIZE > buffer_size { | Could do static assert on REPORT_RESPONSE_SIZE > REPORT_REQUEST_SIZE and only check REPORT_RESPONSE_SIZE |
svsm | github_2023 | others | 69 | coconut-svsm | daaltobe | @@ -0,0 +1,196 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+/// This files provides an API to request SNP_GUEST_REQUEST services
+/// to the AMD Secure Processor (a.k.a. PSP).
+///
+/// The PSP accepts only one SNP_GUEST_REQUEST at a time, we ensure that by
+/// having a global GREQ_DRIVER under a spinlock.
+///
+/// This API can be reached from outside the SVSM (e.g. OVMF, guest kernel)
+/// through the SVSM runtime services (protocols).
+///
+/// Further information can be found in the Secure Nested Paging Firmware ABI
+/// Specification, Chapter 7.
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::greq::driver::SnpGuestRequestDriver;
+use crate::greq::msg::SNP_MSG_REPORT_REQ;
+use crate::greq::msg_report::{SnpReportRequest, SnpReportResponse};
+use crate::locking::{LockGuard, SpinLock};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::disable_vmpck0;
+
+use log;
+
+// SNP_GUEST_REQUEST driver
+static GREQ_DRIVER: SpinLock<SnpGuestRequestDriver> =
+ SpinLock::new(SnpGuestRequestDriver::default());
+
+pub fn greq_driver_init() {
+ if let Err(e) = GREQ_DRIVER.lock().init() {
+ log::error!("SNP_GUEST_REQUEST driver failed to initialize, e={:?}", e);
+ disable_vmpck0();
+ }
+}
+
+fn get_report_common(
+ driver: &mut LockGuard<SnpGuestRequestDriver>,
+ buffer: VirtAddr,
+ buffer_size: usize,
+ extended: bool,
+) -> Result<usize, SvsmReqError> {
+ const REPORT_REQUEST_SIZE: usize = core::mem::size_of::<SnpReportRequest>();
+ const REPORT_RESPONSE_SIZE: usize = core::mem::size_of::<SnpReportResponse>();
+
+ if buffer.is_null() || REPORT_REQUEST_SIZE > buffer_size || REPORT_RESPONSE_SIZE > buffer_size {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ let request = buffer.as_ptr::<SnpReportRequest>();
+
+ unsafe {
+ if (*request).vmpl() != 0 { | I might hide the details of SnpReportRequest inside this code and have callers pass in input_data only, which also avoids checking VMPL. |
svsm | github_2023 | others | 69 | coconut-svsm | joergroedel | @@ -0,0 +1,305 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+/// This driver implements the protocol defined in the AMD SEV-SNP spec v1.54 (chapter 7)
+/// to send SNP_GUEST_REQUEST (greq) messages to the AMD Secure Processor (a.k.a. PSP)
+///
+/// The PSP provides multiple services through SNP_GUEST_REQUEST messages
+/// including attestation report, key derivation and VM import/export.
+///
+/// A greq message is composed by a generic header and the payload. The header contains
+/// fields that describes the payload. The payload can be the request or response structure
+/// for any of the greq services.
+///
+/// Every greq message is encrypted using AES_GCM 256 bits as the payload contains
+/// sensitive data and it needs to go through the hypervisor to reach out to the PSP.
+///
+/// Every greq message has a monotonic sequence number that prevents it from replay attacks.
+/// This sequence number is synchronized with the PSP.
+extern crate alloc;
+
+use crate::address::VirtAddr;
+use crate::cpu::percpu::this_cpu_mut;
+use crate::error::SvsmError;
+use crate::greq::msg::{SnpGuestRequestExtData, SnpGuestRequestMsg};
+use crate::protocols::errors::{SvsmReqError, SvsmResultCode};
+use crate::sev::ghcb::GhcbError;
+use crate::sev::secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE};
+use crate::{getter_func, BIT};
+
+/// Hypervisor error codes
+pub const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+pub const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+#[derive(Debug)]
+pub struct SnpGuestRequestDriver {
+ // SNP_GUEST_REQUEST requires two pages: one for
+ // the request and another for the response message. Both
+ // of them have to be assigned to the hypervisor (shared).
+ request: SnpGuestRequestMsg,
+ response: SnpGuestRequestMsg,
+
+ // SNP Extended Guest Request data pages also need to be shared
+ // with the hypervisor
+ ext_data: SnpGuestRequestExtData,
+
+ msg_seqno: u64,
+
+ is_initialized: bool,
+}
+
+impl SnpGuestRequestDriver {
+ getter_func!(ext_data, SnpGuestRequestExtData);
+
+ /// The global driver instance GREQ_DRIVER requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// https://github.com/rust-lang/rust/issues/67792
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ request: SnpGuestRequestMsg::default(),
+ response: SnpGuestRequestMsg::default(),
+ ext_data: SnpGuestRequestExtData::default(),
+ msg_seqno: 0,
+ is_initialized: false,
+ }
+ }
+
+ pub fn init(&mut self) -> Result<(), SvsmReqError> {
+ if !self.is_initialized {
+ self.request.init()?;
+ self.response.init()?;
+ self.ext_data.init()?; | If I checked correctly, none of the underlying types implement the drop trait. The memory allocated in these functions will be leaked if initialization fails. Can you please implement the drop trait for `SnpGuestRequestMsg` and `SnpGuestRequestExtData` so that the pages are freed and set to private again? |
svsm | github_2023 | others | 69 | coconut-svsm | deeglaze | @@ -480,6 +482,61 @@ impl GHCB {
Ok(())
}
+ pub fn guest_request(
+ &mut self,
+ req_page: VirtAddr,
+ resp_page: VirtAddr,
+ ) -> Result<(), SvsmError> {
+ self.clear();
+
+ let info1: u64 = u64::from(virt_to_phys(req_page));
+ let info2: u64 = u64::from(virt_to_phys(resp_page));
+
+ self.vmgexit(GHCBExitCode::GUEST_REQUEST, info1, info2)?;
+
+ if !self.is_valid(OFF_SW_EXIT_INFO_2) {
+ return Err(GhcbError::VmgexitInvalid.into());
+ }
+
+ if self.sw_exit_info_2 != 0 {
+ return Err(GhcbError::VmgexitError(self.sw_exit_info_1, self.sw_exit_info_2).into());
+ }
+
+ Ok(())
+ }
+
+ pub fn guest_ext_request(
+ &mut self,
+ req_page: VirtAddr,
+ resp_page: VirtAddr,
+ data_pages: VirtAddr,
+ data_size: u64,
+ ) -> Result<(), SvsmError> {
+ self.clear();
+
+ let info1: u64 = u64::from(virt_to_phys(req_page));
+ let info2: u64 = u64::from(virt_to_phys(resp_page));
+ let rax: u64 = u64::from(virt_to_phys(data_pages));
+
+ self.set_rax(rax);
+ self.set_rbx(data_size);
+
+ self.vmgexit(GHCBExitCode::GUEST_EXT_REQUEST, info1, info2)?;
+
+ if !self.is_valid(OFF_SW_EXIT_INFO_2) {
+ return Err(GhcbError::VmgexitInvalid.into());
+ }
+
+ // On error, RBX will contain the number of guest contiguous pages
+ // required to hold the data to be returned
+ if self.sw_exit_info_2 != 0 { | ~Not every nonzero sw_exit_info_2 value is fatal, such as 2 << 32, from GHCB section 4.1.7, which indicates the guest should wait and retry.~
Retracted, I see you have this handled at the callsite. |
svsm | github_2023 | others | 69 | coconut-svsm | deeglaze | @@ -0,0 +1,539 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) {
+ unsafe { copy_nonoverlapping(tag.as_ptr(), self.authtag.as_mut_ptr(), AUTHTAG_SIZE) };
+ }
+
+ /// Validate the header
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let header_size = u16::try_from(MSG_HDR_SIZE).unwrap();
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SNP_AEAD_AES_256_GCM
+ || self.msg_type != msg_type
+ || self.msg_vmpck != 0
+ || self.msg_seqno != *msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used for additional authenticated data
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+impl SnpGuestRequestMsg {
+ /// The global driver instance (GREQ_DRIVER) requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` message (one [PAGE_SIZE]) and
+ /// share it with the hypervisor
+ pub fn init(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ Ok(())
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` buffer
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if this_cpu_mut()
+ .get_pgtable()
+ .set_encrypted_4k(self.buffer)
+ .is_err()
+ {
+ log::error!("Failed to set private the SNP_GUEST_REQUEST buffer");
+ }
+ free_page(self.buffer);
+ self.buffer = VirtAddr::null();
+ }
+ }
+
+ /// Clear the `SNP_GUEST_REQUEST` message
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ /// Get the [VirtAddr] of the `SNP_GUEST_REQUEST` message
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ /// Get a copy of the `SNP_GUEST_REQUEST` message header
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::default();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ /// Set the `SNP_GUEST_REQUEST` message header
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr::<u8>(),
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ /// Get the `SNP_GUEST_REQUEST` message payload address
+ pub fn payload_offset(&mut self) -> *mut u8 {
+ unsafe { self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE) }
+ }
+
+ /// Load a SNP_GUEST_REQUEST message to be sent to the PSP. The message
+ /// will be encrypted using AES-256 GCM.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Type of message to be created
+ /// * `msg_seqno`: Sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to encrypt the `payload`
+ /// * `payload`: [VirtAddr] of the message payload, which will be encrypted
+ /// * `payload_len`: Size of the `payload` in bytes
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * ()
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn load(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ payload: VirtAddr,
+ payload_size: u16,
+ ) -> Result<(), SvsmReqError> {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new(payload_size, msg_type, msg_seqno);
+
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+
+ let plaintext_size = usize::from(payload_size);
+ let plaintext = unsafe { from_raw_parts(payload.as_ptr::<u8>(), plaintext_size) };
+
+ let msg_payload_addr = self.buffer + MSG_HDR_SIZE;
+
+ // Encrypt the provided payload store and store it in the message
+ let written_bytes: usize = Aes256Gcm::encrypt(
+ &iv,
+ vmpck0,
+ aad,
+ plaintext,
+ msg_payload_addr,
+ MSG_PAYLOAD_SIZE,
+ )?;
+
+ // In the Aes256Gcm encrypt API, the authtag is postfixed in the outbuf
+ let ciphertext_size: usize = written_bytes - AUTHTAG_SIZE;
+ assert_eq!(usize::from(payload_size), ciphertext_size);
+
+ // Move the authtag to the message header
+ let tag: VirtAddr = msg_payload_addr + ciphertext_size;
+ msg_hdr.set_authtag(tag);
+ unsafe { write_bytes(tag.as_mut_ptr::<u8>(), 0_u8, AUTHTAG_SIZE) };
+
+ self.set_hdr(&msg_hdr);
+
+ Ok(())
+ }
+
+ /// Unwrap the SNP_GUEST_REQUEST message, which is encrypted using AES-256 GCM, and save it
+ /// the provided buffer.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Expected type of the message
+ /// * `msg_seqno`: Expected sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to decrypt the message payload
+ /// * `outbuf`: [VirtAddr] of the buffer where the unwrapped message will be saved (if there is enough space)
+ /// * `outbuf_size`: Size in bytes of `outbuf`
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * usize: Number of bytes written to `outbuf`
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn unwrap(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ outbuf: VirtAddr,
+ outbuf_size: usize,
+ ) -> Result<usize, SvsmReqError> {
+ let msg_hdr = self.get_hdr_copy();
+ msg_hdr.validate(msg_type, &msg_seqno)?;
+
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+
+ let ciphertext_len = usize::from(msg_hdr.msg_sz);
+
+ // In the Aes256Gcm decrypt API, the authtag must be provided postfix in the inbuf
+ let inbuf_len: usize = ciphertext_len + AUTHTAG_SIZE;
+
+ // Both the ciphertext and the authenticated tag must fit in the message payload
+ if inbuf_len > MSG_PAYLOAD_SIZE {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let inbuf = unsafe { from_raw_parts_mut(self.payload_offset(), inbuf_len) };
+ inbuf[ciphertext_len..].copy_from_slice(&msg_hdr.authtag[..AUTHTAG_SIZE]);
+
+ let written_bytes: usize =
+ Aes256Gcm::decrypt(&iv, vmpck0, aad, inbuf, outbuf, outbuf_size)?; | inbuf is used directly from shared memory instead of from a copy in encrypted memory. Do we know if this decrypt operation is strong against iago attacks, such as if inbuf is read more than once during decryption, and its contents are expected to be constant throughout? |
svsm | github_2023 | others | 69 | coconut-svsm | deeglaze | @@ -0,0 +1,348 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` driver
+
+extern crate alloc;
+
+use crate::address::VirtAddr;
+use crate::cpu::percpu::this_cpu_mut;
+use crate::error::SvsmError;
+use crate::greq::msg::{SnpGuestRequestExtData, SnpGuestRequestMsg};
+use crate::protocols::errors::{SvsmReqError, SvsmResultCode};
+use crate::sev::ghcb::GhcbError;
+use crate::sev::secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE};
+use crate::BIT;
+
+// Hypervisor error codes
+
+/// Buffer provided is too small
+pub const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// Hypervisor busy, try again
+pub const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+#[derive(Debug, PartialEq)]
+enum DriverState {
+ Initial = 0,
+ Ready = 1,
+ Failed = 2,
+}
+
+/// `SNP_GUEST_REQUEST` driver
+#[derive(Debug)]
+pub struct SnpGuestRequestDriver {
+ /// `SNP_GUEST_REQUEST` message that carries the request
+ request: SnpGuestRequestMsg,
+ /// `SNP_GUEST_REQUEST` message that carries the response
+ response: SnpGuestRequestMsg,
+ /// `SNP_GUEST_REQUEST` extended data, usually used to carry
+ /// hypervisor-provided data (e.g. attestation report certificates)
+ ext_data: SnpGuestRequestExtData,
+ /// Each `SNP_GUEST_REQUEST` message contains a sequence number per VMPCK.
+ /// It is incremented with each message sent. Messages sent by the guest to
+ /// the PSP and by the PSP to the guest must be delivered in order. If not,
+ /// the PSP will reject subsequent messages by the guest when it detects that
+ /// the sequence numbers are out of sync.
+ ///
+ /// NOTE: If the vmpl field of a `SNP_GUEST_REQUEST` message is set to VMPL0,
+ /// then it must contain the VMPL0 sequence number and be protected (encrypted)
+ /// with the VMPCK0 key; additionally, if this message fails, the VMPCK0 key
+ /// must be disabled. The same idea applies to the other VMPL levels.
+ ///
+ /// The guest kernel runs in VMPL1 and it should be able to protect and send
+ /// VMPL1 `SNP_GUEST_REQUEST` messages directly to the PSP; hence,
+ /// the SVSM needs to support only VMPL0 `SNP_GUEST_REQUEST` messages.
+ /// In other words, it needs to maintain the sequence number only for VMPL0.
+ vmpck0_seqno: u64,
+ /// Driver current state. `SNP_GUEST_REQUEST` messages can be sent to the PSP
+ /// only if the driver is in the ready state.
+ state: DriverState,
+}
+
+impl SnpGuestRequestDriver {
+ /// Prepare for a new extended guest request
+ pub fn extended_request_prepare(&mut self, len: usize) -> Result<(), SvsmReqError> {
+ self.ext_data.set_len(len)?;
+ self.ext_data.clear();
+
+ Ok(())
+ }
+
+ /// Copy the first n bytes from the extended data to the provided buffer
+ pub fn extended_data_ncopy_to(
+ &self,
+ n: usize,
+ outbuf: VirtAddr,
+ outbuf_size: usize,
+ ) -> Result<(), SvsmReqError> {
+ self.ext_data.ncopy_to(n, outbuf, outbuf_size)
+ }
+
+ /// Check if the extended data is zeroed
+ pub fn is_extended_data_clear(&self) -> bool {
+ self.ext_data.is_clear()
+ }
+
+ /// The global driver instance GREQ_DRIVER requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like `#[derive(const_Default)]`
+ pub const fn default() -> Self {
+ Self {
+ request: SnpGuestRequestMsg::default(),
+ response: SnpGuestRequestMsg::default(),
+ ext_data: SnpGuestRequestExtData::default(),
+ vmpck0_seqno: 0,
+ state: DriverState::Initial,
+ }
+ }
+
+ fn do_init(&mut self) -> Result<(), SvsmReqError> {
+ self.request.init()?;
+ self.response.init()?;
+ self.ext_data.init()?;
+
+ Ok(())
+ }
+
+ fn is_ready(&self) -> bool {
+ self.state == DriverState::Ready
+ }
+
+ /// Initialize the `SNP_GUEST_REQUEST` driver
+ pub fn init(&mut self) -> Result<(), SvsmReqError> {
+ if self.state != DriverState::Initial {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let result = self.do_init();
+ if result.is_ok() {
+ self.state = DriverState::Ready;
+ } else {
+ self.free();
+ self.state = DriverState::Failed;
+ // Without the driver we can't send `SNP_GUEST_REQUEST` messages to the PSP.
+ // Clear the VMPCK0 to prevent it from being exploited.
+ disable_vmpck0();
+ }
+
+ result
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` driver
+ pub fn free(&mut self) {
+ self.request.free();
+ self.response.free();
+ self.ext_data.free();
+ }
+
+ /// Get the last VMPCK0 sequence number accounted
+ fn seqno_last_used(&self) -> u64 {
+ self.vmpck0_seqno
+ }
+
+ /// Increase the VMPCK0 sequence number by two. In order to keep the
+ /// sequence number in-sync with the PSP, this is called only when the
+ /// `SNP_GUEST_REQUEST` response is received.
+ fn seqno_add_two(&mut self) {
+ self.vmpck0_seqno += 2;
+ }
+
+ /// Call the GHCB layer to send the encrypted SNP_GUEST_REQUEST message
+ /// to the PSP.
+ fn send(&mut self, extended: bool) -> Result<(), SvsmReqError> {
+ self.response.clear();
+
+ if extended {
+ this_cpu_mut().ghcb().guest_ext_request(
+ self.request.as_va(),
+ self.response.as_va(),
+ self.ext_data.as_va(),
+ self.ext_data.npages(),
+ )?;
+ } else {
+ this_cpu_mut()
+ .ghcb()
+ .guest_request(self.request.as_va(), self.response.as_va())?;
+ }
+
+ self.seqno_add_two();
+
+ Ok(())
+ }
+
+ /// Send a VMPL0 `SNP_GUEST_REQUEST` command to the PSP.
+ /// The command will be encrypted using AES-256 GCM.
+ ///
+ /// # Parameters:
+ ///
+ /// * `msg_type`: type of the command stored in `buffer`
+ /// * `extended`: whether or not this is an extended `SNP_GUEST_REQUEST` command
+ /// * `buffer`: [VirtAddr] of the buffer that contains the `SNP_GUEST_REQUEST` command
+ /// The same buffer will also be used to store the response.
+ /// * `buffer_size`: Total size of `buffer` in bytes
+ /// * `buffer_len`: Number of bytes from `buffer` being used for the command
+ ///
+ /// # Return codes:
+ ///
+ /// * Success:
+ /// * `usize`: Number of bytes from `buffer` being used for the response
+ /// * Error:
+ /// * `SvsmReqError`
+ pub fn send_request(
+ &mut self,
+ msg_type: u8,
+ extended: bool,
+ buffer: VirtAddr,
+ buffer_size: usize,
+ buffer_len: u16,
+ ) -> Result<usize, SvsmReqError> {
+ if !self.is_ready() || is_vmpck0_clear() {
+ return Err(SvsmReqError::invalid_request());
+ }
+ // Message sequence number overflow, the driver will not able
+ // to send `SNP_GUEST_REQUEST` messages to the PSP. The sequence number is
+ // restored only when the guest is rebooted.
+ // Let's clear the VMPCK0 to prevent it from being exploited.
+ let Some(msg_seqno) = self.seqno_last_used().checked_add(1) else {
+ log::error!("SNP_GUEST_REQUEST: sequence number overflow");
+ disable_vmpck0();
+ return Err(SvsmReqError::invalid_request());
+ };
+
+ // VMPL0 `SNP_GUEST_REQUEST` commands are encrypted with the VMPCK0
+ let vmpck0: [u8; VMPCK_SIZE] = get_vmpck0();
+
+ self.request
+ .load(msg_type, msg_seqno, &vmpck0, buffer, buffer_len)?;
+
+ if let Err(e) = self.send(extended) {
+ if let SvsmReqError::FatalError(SvsmError::Ghcb(GhcbError::VmgexitError(_rbx, info2))) =
+ e
+ {
+ // For some reason the hypervisor did not forward the request to the PSP.
+ // Let's resend it to prevent the IV from being exploited.
+ match info2 & 0xffff_ffff_0000_0000u64 {
+ // The certificate buffer provided is too small.
+ SNP_GUEST_REQ_INVALID_LEN => {
+ if extended { | Do we expect `extended` to be false when the host sends back this value? You still need to complete the PSP request for the sequence numbers to stay in sync, so why keep this guarded? |
svsm | github_2023 | others | 69 | coconut-svsm | deeglaze | @@ -0,0 +1,539 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) {
+ unsafe { copy_nonoverlapping(tag.as_ptr(), self.authtag.as_mut_ptr(), AUTHTAG_SIZE) };
+ }
+
+ /// Validate the header
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let header_size = u16::try_from(MSG_HDR_SIZE).unwrap();
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SNP_AEAD_AES_256_GCM
+ || self.msg_type != msg_type
+ || self.msg_vmpck != 0
+ || self.msg_seqno != *msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used for additional authenticated data
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+impl SnpGuestRequestMsg {
+ /// The global driver instance (GREQ_DRIVER) requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` message (one [PAGE_SIZE]) and
+ /// share it with the hypervisor
+ pub fn init(&mut self) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ Ok(())
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` buffer
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if this_cpu_mut()
+ .get_pgtable()
+ .set_encrypted_4k(self.buffer)
+ .is_err()
+ {
+ log::error!("Failed to set private the SNP_GUEST_REQUEST buffer");
+ }
+ free_page(self.buffer);
+ self.buffer = VirtAddr::null();
+ }
+ }
+
+ /// Clear the `SNP_GUEST_REQUEST` message
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ /// Get the [VirtAddr] of the `SNP_GUEST_REQUEST` message
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ /// Get a copy of the `SNP_GUEST_REQUEST` message header
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::default();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ /// Set the `SNP_GUEST_REQUEST` message header
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr::<u8>(),
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ /// Get the `SNP_GUEST_REQUEST` message payload address
+ pub fn payload_offset(&mut self) -> *mut u8 {
+ unsafe { self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE) }
+ }
+
+ /// Load a SNP_GUEST_REQUEST message to be sent to the PSP. The message
+ /// will be encrypted using AES-256 GCM.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Type of message to be created
+ /// * `msg_seqno`: Sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to encrypt the `payload`
+ /// * `payload`: [VirtAddr] of the message payload, which will be encrypted
+ /// * `payload_len`: Size of the `payload` in bytes
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * ()
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn load(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ payload: VirtAddr,
+ payload_size: u16,
+ ) -> Result<(), SvsmReqError> {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new(payload_size, msg_type, msg_seqno);
+
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+
+ let plaintext_size = usize::from(payload_size);
+ let plaintext = unsafe { from_raw_parts(payload.as_ptr::<u8>(), plaintext_size) }; | Similar comment to below about decrypt: is this encryption strong against iago attacks, or can you add the double-buffering mitigation I added to Linux's sev-guest so that all encryption is done in encrypted memory before getting written to shared memory? |
svsm | github_2023 | others | 69 | coconut-svsm | joergroedel | @@ -0,0 +1,566 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) {
+ unsafe { copy_nonoverlapping(tag.as_ptr(), self.authtag.as_mut_ptr(), AUTHTAG_SIZE) };
+ }
+
+ /// Validate the header
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let header_size = u16::try_from(MSG_HDR_SIZE).unwrap();
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SNP_AEAD_AES_256_GCM
+ || self.msg_type != msg_type
+ || self.msg_vmpck != 0
+ || self.msg_seqno != *msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used for additional authenticated data
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+#[derive(PartialEq)]
+pub enum MemoryPage {
+ Unencrypted,
+ Encrypted,
+}
+
+impl SnpGuestRequestMsg {
+ /// The global driver instance (GREQ_DRIVER) requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` message (one [PAGE_SIZE]) and
+ /// define whether or not its memory is encrypted
+ pub fn init(&mut self, protection: MemoryPage) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ if protection == MemoryPage::Unencrypted {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ }
+ Ok(())
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` buffer
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if this_cpu_mut()
+ .get_pgtable()
+ .set_encrypted_4k(self.buffer)
+ .is_err()
+ {
+ log::error!("Failed to set private the SNP_GUEST_REQUEST buffer");
+ }
+ free_page(self.buffer);
+ self.buffer = VirtAddr::null();
+ }
+ }
+
+ /// Clear the `SNP_GUEST_REQUEST` message
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ /// Get the [VirtAddr] of the `SNP_GUEST_REQUEST` message
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ /// Get a copy of the `SNP_GUEST_REQUEST` message header
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::default();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ /// Set the `SNP_GUEST_REQUEST` message header
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr::<u8>(),
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ /// Get the `SNP_GUEST_REQUEST` message payload address
+ pub fn payload_offset(&mut self) -> *mut u8 {
+ unsafe { self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE) }
+ }
+
+ pub fn copy_from(&mut self, msg: &SnpGuestRequestMsg) -> Result<(), SvsmReqError> {
+ if self.buffer.is_null() || msg.buffer.is_null() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ unsafe {
+ copy_nonoverlapping(
+ msg.buffer.as_ptr::<u8>(),
+ self.buffer.as_mut_ptr::<u8>(),
+ PAGE_SIZE,
+ );
+ }
+
+ Ok(())
+ }
+
+ /// Load a SNP_GUEST_REQUEST message to be sent to the PSP. The message
+ /// will be encrypted using AES-256 GCM.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Type of message to be created
+ /// * `msg_seqno`: Sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to encrypt the `payload`
+ /// * `payload`: [VirtAddr] of the message payload, which will be encrypted
+ /// * `payload_len`: Size of the `payload` in bytes
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * ()
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn load(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ payload: VirtAddr,
+ payload_size: u16,
+ ) -> Result<(), SvsmReqError> {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new(payload_size, msg_type, msg_seqno);
+
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+
+ let plaintext_size = usize::from(payload_size);
+ let plaintext = unsafe { from_raw_parts(payload.as_ptr::<u8>(), plaintext_size) };
+
+ let msg_payload_addr = self.buffer + MSG_HDR_SIZE;
+
+ // Encrypt the provided payload store and store it in the message
+ let written_bytes: usize = Aes256Gcm::encrypt(
+ &iv,
+ vmpck0,
+ aad,
+ plaintext,
+ msg_payload_addr,
+ MSG_PAYLOAD_SIZE,
+ )?;
+
+ // In the Aes256Gcm encrypt API, the authtag is postfixed in the outbuf
+ let ciphertext_size: usize = written_bytes - AUTHTAG_SIZE;
+ assert_eq!(usize::from(payload_size), ciphertext_size);
+
+ // Move the authtag to the message header
+ let tag: VirtAddr = msg_payload_addr + ciphertext_size;
+ msg_hdr.set_authtag(tag);
+ unsafe { write_bytes(tag.as_mut_ptr::<u8>(), 0_u8, AUTHTAG_SIZE) };
+
+ self.set_hdr(&msg_hdr);
+
+ Ok(())
+ }
+
+ /// Unwrap the SNP_GUEST_REQUEST message, which is encrypted using AES-256 GCM, and save it
+ /// the provided buffer.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Expected type of the message
+ /// * `msg_seqno`: Expected sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to decrypt the message payload
+ /// * `outbuf`: [VirtAddr] of the buffer where the unwrapped message will be saved (if there is enough space)
+ /// * `outbuf_size`: Size in bytes of `outbuf`
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * usize: Number of bytes written to `outbuf`
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn unwrap(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ outbuf: VirtAddr,
+ outbuf_size: usize,
+ ) -> Result<usize, SvsmReqError> {
+ let msg_hdr = self.get_hdr_copy();
+ msg_hdr.validate(msg_type, &msg_seqno)?;
+
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+
+ let ciphertext_len = usize::from(msg_hdr.msg_sz);
+
+ // In the Aes256Gcm decrypt API, the authtag must be provided postfix in the inbuf
+ let inbuf_len: usize = ciphertext_len + AUTHTAG_SIZE;
+
+ // Both the ciphertext and the authenticated tag must fit in the message payload
+ if inbuf_len > MSG_PAYLOAD_SIZE {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let inbuf = unsafe { from_raw_parts_mut(self.payload_offset(), inbuf_len) };
+ inbuf[ciphertext_len..].copy_from_slice(&msg_hdr.authtag[..AUTHTAG_SIZE]);
+
+ let written_bytes: usize =
+ Aes256Gcm::decrypt(&iv, vmpck0, aad, inbuf, outbuf, outbuf_size)?;
+
+ Ok(written_bytes)
+ }
+}
+
+/// Build the initialization vector for AES-256 GCM
+fn build_iv(msg_seqno: u64) -> [u8; IV_SIZE] {
+ const U64_SIZE: usize = core::mem::size_of::<u64>();
+ let mut iv = [0u8; IV_SIZE];
+
+ iv[..U64_SIZE].copy_from_slice(&msg_seqno.to_ne_bytes());
+ iv
+}
+
+/// `SNP_GUEST_REQUEST` extended data format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestExtData {
+ /// Buffer used to store the `SNP_GUEST_REQUEST` extended data
+ buffer: VirtAddr,
+ /// Number of bytes from `buffer` that are in-use
+ len: usize,
+}
+
+impl Drop for SnpGuestRequestExtData {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+impl SnpGuestRequestExtData {
+ /// The global driver instance GREQ_DRIVER requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ len: 0,
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` extended data and define whether or not
+ /// its memory is encrypted
+ pub fn init(&mut self, protection: MemoryPage) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_pages(get_order(SNP_GUEST_REQ_MAX_DATA_SIZE))?;
+ assert!(self.buffer.is_page_aligned());
+ self.len = SNP_GUEST_REQ_MAX_DATA_SIZE;
+
+ if protection == MemoryPage::Unencrypted {
+ self.set_pages_shared()?;
+ }
+
+ Ok(())
+ }
+
+ /// Free the memory allocated for the extended `SNP_GUEST_REQUEST`
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if self.set_pages_encrypted().is_err() {
+ log::error!("Failed to set private the extended SNP_GUEST_REQUEST buffer");
+ }
+ self.free_pages(); | The pages can only be freed when they are set to encrypted, otherwise it is better to leak the memory. Possibly state the leak in the error message. |
svsm | github_2023 | others | 69 | coconut-svsm | joergroedel | @@ -0,0 +1,566 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) {
+ unsafe { copy_nonoverlapping(tag.as_ptr(), self.authtag.as_mut_ptr(), AUTHTAG_SIZE) };
+ }
+
+ /// Validate the header
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let header_size = u16::try_from(MSG_HDR_SIZE).unwrap();
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SNP_AEAD_AES_256_GCM
+ || self.msg_type != msg_type
+ || self.msg_vmpck != 0
+ || self.msg_seqno != *msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used for additional authenticated data
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+#[derive(PartialEq)]
+pub enum MemoryPage {
+ Unencrypted,
+ Encrypted,
+}
+
+impl SnpGuestRequestMsg {
+ /// The global driver instance (GREQ_DRIVER) requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` message (one [PAGE_SIZE]) and
+ /// define whether or not its memory is encrypted
+ pub fn init(&mut self, protection: MemoryPage) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ if protection == MemoryPage::Unencrypted {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ }
+ Ok(())
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` buffer
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if this_cpu_mut()
+ .get_pgtable()
+ .set_encrypted_4k(self.buffer)
+ .is_err()
+ {
+ log::error!("Failed to set private the SNP_GUEST_REQUEST buffer");
+ }
+ free_page(self.buffer); | The page can only be freed if it was set back to encrypted state. We can not leak a shared page back into the allocator. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,566 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) {
+ unsafe { copy_nonoverlapping(tag.as_ptr(), self.authtag.as_mut_ptr(), AUTHTAG_SIZE) };
+ }
+
+ /// Validate the header
+ pub fn validate(&self, msg_type: u8, msg_seqno: &u64) -> Result<(), SvsmReqError> {
+ let header_size = u16::try_from(MSG_HDR_SIZE).unwrap();
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SNP_AEAD_AES_256_GCM
+ || self.msg_type != msg_type
+ || self.msg_vmpck != 0
+ || self.msg_seqno != *msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used for additional authenticated data
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let msg_base = self as *const _ as *const u8;
+ let msg_algo = &self.algo as *const u8;
+
+ let algo_offset = unsafe { msg_algo.offset_from(msg_base) } as usize;
+
+ unsafe { from_raw_parts(msg_algo, MSG_HDR_SIZE - algo_offset) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ // Ensure the allocated buffer memory will not leak if this type is
+ // instantiated outside of the `SNP_GUEST_REQUEST` static driver context
+ self.free();
+ }
+}
+
+#[derive(PartialEq)]
+pub enum MemoryPage {
+ Unencrypted,
+ Encrypted,
+}
+
+impl SnpGuestRequestMsg {
+ /// The global driver instance (GREQ_DRIVER) requires "pub const fn default()",
+ /// however, const Default trait is not supported (yet?)
+ /// <https://github.com/rust-lang/rust/issues/67792>
+ ///
+ /// Once it is supported we should be able to just annotate the structure
+ /// with something like #[derive(const_Default)]
+ pub const fn default() -> Self {
+ Self {
+ buffer: VirtAddr::null(),
+ }
+ }
+
+ /// Allocate the `SNP_GUEST_REQUEST` message (one [PAGE_SIZE]) and
+ /// define whether or not its memory is encrypted
+ pub fn init(&mut self, protection: MemoryPage) -> Result<(), SvsmReqError> {
+ self.buffer = allocate_page()?;
+ if protection == MemoryPage::Unencrypted {
+ this_cpu_mut().get_pgtable().set_shared_4k(self.buffer)?;
+ }
+ Ok(())
+ }
+
+ /// Free the memory allocated for the `SNP_GUEST_REQUEST` buffer
+ pub fn free(&mut self) {
+ if !self.buffer.is_null() {
+ if this_cpu_mut()
+ .get_pgtable()
+ .set_encrypted_4k(self.buffer)
+ .is_err()
+ {
+ log::error!("Failed to set private the SNP_GUEST_REQUEST buffer");
+ }
+ free_page(self.buffer);
+ self.buffer = VirtAddr::null();
+ }
+ }
+
+ /// Clear the `SNP_GUEST_REQUEST` message
+ pub fn clear(&mut self) {
+ unsafe {
+ write_bytes(self.buffer.as_mut_ptr::<u8>(), 0u8, PAGE_SIZE);
+ }
+ }
+
+ /// Get the [VirtAddr] of the `SNP_GUEST_REQUEST` message
+ pub fn as_va(&self) -> VirtAddr {
+ self.buffer
+ }
+
+ /// Get a copy of the `SNP_GUEST_REQUEST` message header
+ pub fn get_hdr_copy(&self) -> SnpGuestRequestMsgHdr {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::default();
+ unsafe {
+ copy_nonoverlapping(
+ self.buffer.as_ptr::<u8>(),
+ ptr::addr_of_mut!(msg_hdr) as *mut u8,
+ MSG_HDR_SIZE,
+ );
+ }
+
+ msg_hdr
+ }
+
+ /// Set the `SNP_GUEST_REQUEST` message header
+ fn set_hdr(&mut self, msg_hdr: &SnpGuestRequestMsgHdr) {
+ unsafe {
+ copy_nonoverlapping(
+ msg_hdr as *const _ as *const u8,
+ self.buffer.as_mut_ptr::<u8>(),
+ MSG_HDR_SIZE,
+ );
+ }
+ }
+
+ /// Get the `SNP_GUEST_REQUEST` message payload address
+ pub fn payload_offset(&mut self) -> *mut u8 {
+ unsafe { self.buffer.as_mut_ptr::<u8>().add(MSG_HDR_SIZE) }
+ }
+
+ pub fn copy_from(&mut self, msg: &SnpGuestRequestMsg) -> Result<(), SvsmReqError> {
+ if self.buffer.is_null() || msg.buffer.is_null() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ unsafe {
+ copy_nonoverlapping(
+ msg.buffer.as_ptr::<u8>(),
+ self.buffer.as_mut_ptr::<u8>(),
+ PAGE_SIZE,
+ );
+ }
+
+ Ok(())
+ }
+
+ /// Load a SNP_GUEST_REQUEST message to be sent to the PSP. The message
+ /// will be encrypted using AES-256 GCM.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Type of message to be created
+ /// * `msg_seqno`: Sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to encrypt the `payload`
+ /// * `payload`: [VirtAddr] of the message payload, which will be encrypted
+ /// * `payload_len`: Size of the `payload` in bytes
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * ()
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn load(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ payload: VirtAddr,
+ payload_size: u16,
+ ) -> Result<(), SvsmReqError> {
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new(payload_size, msg_type, msg_seqno);
+
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+
+ let plaintext_size = usize::from(payload_size);
+ let plaintext = unsafe { from_raw_parts(payload.as_ptr::<u8>(), plaintext_size) };
+
+ let msg_payload_addr = self.buffer + MSG_HDR_SIZE;
+
+ // Encrypt the provided payload store and store it in the message
+ let written_bytes: usize = Aes256Gcm::encrypt(
+ &iv,
+ vmpck0,
+ aad,
+ plaintext,
+ msg_payload_addr,
+ MSG_PAYLOAD_SIZE,
+ )?;
+
+ // In the Aes256Gcm encrypt API, the authtag is postfixed in the outbuf
+ let ciphertext_size: usize = written_bytes - AUTHTAG_SIZE;
+ assert_eq!(usize::from(payload_size), ciphertext_size);
+
+ // Move the authtag to the message header
+ let tag: VirtAddr = msg_payload_addr + ciphertext_size;
+ msg_hdr.set_authtag(tag);
+ unsafe { write_bytes(tag.as_mut_ptr::<u8>(), 0_u8, AUTHTAG_SIZE) };
+
+ self.set_hdr(&msg_hdr);
+
+ Ok(())
+ }
+
+ /// Unwrap the SNP_GUEST_REQUEST message, which is encrypted using AES-256 GCM, and save it
+ /// the provided buffer.
+ ///
+ /// # Parameters
+ ///
+ /// * `msg_type`: Expected type of the message
+ /// * `msg_seqno`: Expected sequence number of the message
+ /// * `vmpck0`: VMPCK0 key to decrypt the message payload
+ /// * `outbuf`: [VirtAddr] of the buffer where the unwrapped message will be saved (if there is enough space)
+ /// * `outbuf_size`: Size in bytes of `outbuf`
+ ///
+ /// # Return codes:
+ ///
+ /// * Success
+ /// * usize: Number of bytes written to `outbuf`
+ /// * Error
+ /// * [SvsmReqError]
+ pub fn unwrap(
+ &mut self,
+ msg_type: u8,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ outbuf: VirtAddr,
+ outbuf_size: usize,
+ ) -> Result<usize, SvsmReqError> {
+ let msg_hdr = self.get_hdr_copy();
+ msg_hdr.validate(msg_type, &msg_seqno)?;
+
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+
+ let ciphertext_len = usize::from(msg_hdr.msg_sz);
+
+ // In the Aes256Gcm decrypt API, the authtag must be provided postfix in the inbuf
+ let inbuf_len: usize = ciphertext_len + AUTHTAG_SIZE;
+
+ // Both the ciphertext and the authenticated tag must fit in the message payload
+ if inbuf_len > MSG_PAYLOAD_SIZE {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let inbuf = unsafe { from_raw_parts_mut(self.payload_offset(), inbuf_len) };
+ inbuf[ciphertext_len..].copy_from_slice(&msg_hdr.authtag[..AUTHTAG_SIZE]);
+
+ let written_bytes: usize =
+ Aes256Gcm::decrypt(&iv, vmpck0, aad, inbuf, outbuf, outbuf_size)?;
+
+ Ok(written_bytes)
+ }
+}
+
+/// Build the initialization vector for AES-256 GCM
+fn build_iv(msg_seqno: u64) -> [u8; IV_SIZE] {
+ const U64_SIZE: usize = core::mem::size_of::<u64>();
+ let mut iv = [0u8; IV_SIZE];
+
+ iv[..U64_SIZE].copy_from_slice(&msg_seqno.to_ne_bytes());
+ iv
+}
+
+/// `SNP_GUEST_REQUEST` extended data format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestExtData { | In it's current state, this data structure doesn't seem sound to me:
1. There's nothing stopping the user from calling `init` multiple times and leaking memory.
2. `set_len` allows the user to freely set any value. We can no longer rely on `len` being accurate. `set_len` should either be made unsafe or removed.
3. `ncopy_to` shouldn't take `outbuf` and `outbuf_size` like that. This is really unidiomatic for Rust code. This function should either be unsafe or not take `outbuf` and `outbuf_size` like that. Maybe a slice could work? This problem also occurs with other functions added in this pr. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,566 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` message that carries the actual command in the payload
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::cpu::percpu::this_cpu_mut;
+use crate::crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE};
+use crate::mm::alloc::{allocate_page, allocate_pages, free_page, get_order};
+use crate::protocols::errors::SvsmReqError;
+use crate::sev::secrets_page::VMPCK_SIZE;
+use crate::types::{PAGE_SHIFT, PAGE_SIZE};
+
+use core::ptr;
+use core::ptr::{copy_nonoverlapping, write_bytes};
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+
+/// AEAD algorithm: Invalid
+pub const SNP_AEAD_INVALID: u8 = 0;
+/// AEAD algorithm: AES-256 GCM
+pub const SNP_AEAD_AES_256_GCM: u8 = 1;
+
+// Message Type Encodings (AMD SEV-SNP spec. table 100)
+
+/// Message type: Invalid
+pub const SNP_MSG_TYPE_INVALID: u8 = 0;
+/// Message type: Report request
+pub const SNP_MSG_REPORT_REQ: u8 = 5;
+/// Message type: Report response
+pub const SNP_MSG_REPORT_RSP: u8 = 6;
+
+// A guest request message size is one PAGE_SIZE: header + payload
+
+/// Message header size
+const MSG_HDR_SIZE: usize = core::mem::size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = 4000;
+
+/// Maximum buffer size the hypervisor considers to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Debug)]
+pub struct SnpGuestRequestMsg {
+ /// Buffer used to store the actual `SNP_GUEST_REQUEST` message
+ /// header and payload, as described in the struct below.
+ /// The payload format differs for each messsage type.
+ /// ```text
+ /// #[repr(C, packed)]
+ /// struct SnpGuestRequestMsg {
+ /// header: SnpGuestRequestMsgHdr,
+ /// payload: [u8; MSG_PAYLOAD_SIZE],
+ /// };
+ /// ```
+ pub buffer: VirtAddr,
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new header and initialize it
+ pub fn new(msg_sz: u16, msg_type: u8, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SNP_AEAD_AES_256_GCM,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the header authenticated tag
+ fn set_authtag(&mut self, tag: VirtAddr) { | This function should take a `tag: &[u8; 32]` (or `&[u8]`). |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! RustCrypto implementation
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use core::ptr::copy_nonoverlapping;
+
+use crate::address::VirtAddr;
+use crate::crypto::aead::{
+ Aes256Gcm as CryptoAes256Gcm, Aes256GcmTrait as CryptoAes256GcmTrait, IV_SIZE, KEY_SIZE,
+};
+use crate::protocols::errors::SvsmReqError;
+
+#[repr(u64)]
+#[derive(PartialEq)]
+enum AesGcmOperation {
+ Encrypt = 0,
+ Decrypt = 1,
+}
+
+fn aes_gcm_do(
+ operation: AesGcmOperation,
+ iv: &[u8; IV_SIZE],
+ key: &[u8; KEY_SIZE],
+ aad: &[u8],
+ inbuf: &[u8],
+ outbuf: VirtAddr,
+ outbuf_size: usize,
+) -> Result<usize, SvsmReqError> {
+ let payload = Payload { msg: inbuf, aad };
+
+ let aes_key = Key::<Aes256Gcm>::from_slice(key);
+ let gcm = Aes256Gcm::new(aes_key);
+ let nonce = Nonce::from_slice(iv);
+
+ let result = if operation == AesGcmOperation::Encrypt {
+ gcm.encrypt(nonce, payload)
+ } else {
+ gcm.decrypt(nonce, payload)
+ };
+
+ let Ok(buffer) = result else {
+ return Err(SvsmReqError::invalid_format());
+ }; | ```suggestion
let buffer = result.map_err(|_| SvsmReqError::invalid_format())?;
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,184 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! API to send `SNP_GUEST_REQUEST` commands to the PSP
+
+extern crate alloc;
+
+use crate::address::{Address, VirtAddr};
+use crate::greq::driver::{SnpGuestRequestClass, SnpGuestRequestDriver};
+use crate::greq::msg::SNP_MSG_REPORT_REQ;
+use crate::greq::pld_report::{SnpReportRequest, SnpReportResponse};
+use crate::locking::{LockGuard, SpinLock};
+use crate::protocols::errors::SvsmReqError;
+
+use log;
+
+/// `SNP_GUEST_REQUEST` driver.
+/// The PSP accepts only one `SNP_GUEST_REQUEST` command at a time
+static GREQ_DRIVER: SpinLock<SnpGuestRequestDriver> =
+ SpinLock::new(SnpGuestRequestDriver::default());
+
+/// Initialize the `SNP_GUEST_REQUEST` driver
+pub fn greq_driver_init() {
+ if let Err(e) = GREQ_DRIVER.lock().init() {
+ log::error!("SNP_GUEST_REQUEST driver failed to initialize, e={:?}", e); | ```suggestion
log::error!("SNP_GUEST_REQUEST driver failed to initialize, e={e:?}");
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! RustCrypto implementation
+
+use aes_gcm::{
+ aead::{Aead, Payload},
+ Aes256Gcm, Key, KeyInit, Nonce,
+};
+use core::ptr::copy_nonoverlapping;
+
+use crate::{
+ crypto::aead::{
+ Aes256Gcm as CryptoAes256Gcm, Aes256GcmTrait as CryptoAes256GcmTrait, IV_SIZE, KEY_SIZE,
+ },
+ protocols::errors::SvsmReqError,
+};
+
+#[repr(u64)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum AesGcmOperation {
+ Encrypt = 0,
+ Decrypt = 1,
+}
+
+fn aes_gcm_do(
+ operation: AesGcmOperation,
+ iv: &[u8; IV_SIZE],
+ key: &[u8; KEY_SIZE],
+ aad: &[u8],
+ inbuf: &[u8],
+ outbuf: &mut [u8],
+) -> Result<usize, SvsmReqError> {
+ let payload = Payload { msg: inbuf, aad };
+
+ let aes_key = Key::<Aes256Gcm>::from_slice(key);
+ let gcm = Aes256Gcm::new(aes_key);
+ let nonce = Nonce::from_slice(iv);
+
+ let result = if operation == AesGcmOperation::Encrypt {
+ gcm.encrypt(nonce, payload)
+ } else {
+ gcm.decrypt(nonce, payload)
+ };
+
+ let buffer = result.map_err(|_| SvsmReqError::invalid_format())?;
+
+ // Buffer overflow
+ if buffer.len() > outbuf.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ unsafe { copy_nonoverlapping(buffer.as_ptr(), outbuf.as_mut_ptr(), buffer.len()) }; | ```suggestion
let outbuf = outbuf
.get_mut(..buffer.len())
.ok_or_else(SvsmReqError::invalid_parameter)?;
outbuf.copy_from_slice(&buffer);
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,523 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Driver to send `SNP_GUEST_REQUEST` commands to the PSP. It can be any of the
+//! request or response command types defined in the SEV-SNP spec, regardless if it's
+//! a regular or an extended command.
+
+extern crate alloc;
+
+use crate::{
+ address::VirtAddr,
+ cpu::percpu::this_cpu_mut,
+ error::SvsmError,
+ greq::msg::{
+ SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType,
+ SNP_GUEST_REQ_MAX_DATA_SIZE,
+ },
+ locking::SpinLock,
+ mm::alloc::{allocate_page, allocate_pages, free_page, get_order},
+ protocols::errors::{SvsmReqError, SvsmResultCode},
+ sev::{
+ ghcb::GhcbError,
+ secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE},
+ },
+ types::{PAGE_SHIFT, PAGE_SIZE},
+ BIT,
+};
+use core::{cell::OnceCell, mem::size_of, slice::from_raw_parts_mut};
+
+/// Global `SNP_GUEST_REQUEST` driver instance
+static GREQ_DRIVER: SpinLock<OnceCell<SnpGuestRequestDriver>> = SpinLock::new(OnceCell::new());
+
+// Hypervisor error codes
+
+/// Buffer provided is too small
+const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// Hypervisor busy, try again
+const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+/// Class of the `SNP_GUEST_REQUEST` command: Regular or Extended
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+enum SnpGuestRequestClass {
+ Regular = 0,
+ Extended = 1,
+}
+
+/// `SNP_GUEST_REQUEST` driver
+#[derive(Debug, Default)]
+struct SnpGuestRequestDriver<'a> {
+ /// Shared page used for the `SNP_GUEST_REQUEST` request
+ request: Option<&'a mut SnpGuestRequestMsg>,
+ /// Shared page used for the `SNP_GUEST_REQUEST` response
+ response: Option<&'a mut SnpGuestRequestMsg>,
+ /// Protected page where we encrypt/decrypt the request/response messages
+ staging: Option<&'a mut SnpGuestRequestMsg>,
+ /// Data of an extended `SNP_GUEST_REQUEST` command used to carry
+ /// the SEV-SNP certificates
+ ext_data: Option<&'a mut SnpGuestRequestExtData>,
+ /// `certs` buffer size provided by the user in [`get_extended_report()`].
+ /// The driver provides a buffer of the same size to the hypervisor.
+ user_extdata_size: usize,
+ /// Each `SNP_GUEST_REQUEST` message contains a sequence number per VMPCK.
+ /// The sequence number is incremented with each message sent. Messages
+ /// sent by the guest to the PSP and by the PSP to the guest must be
+ /// delivered in order. If not, the PSP will reject subsequent messages
+ /// by the guest when it detects that the sequence numbers are out of sync.
+ ///
+ /// NOTE: If the vmpl field of a `SNP_GUEST_REQUEST` message is set to VMPL0,
+ /// then it must contain the VMPL0 sequence number and be protected (encrypted)
+ /// with the VMPCK0 key; additionally, if this message fails, the VMPCK0 key
+ /// must be disabled. The same idea applies to the other VMPL levels.
+ ///
+ /// The SVSM needs to support only VMPL0 `SNP_GUEST_REQUEST` commands because
+ /// other layers in the software stack (e.g. OVMF and guest kernel) can send
+ /// non-VMPL0 commands directly to PSP. Therefore, the SVSM needs to maintain
+ /// the sequence number and the VMPCK only for VMPL0.
+ vmpck0_seqno: u64,
+}
+
+fn set_pages_4k_encrypted(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_encrypted_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn set_pages_4k_shared(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_shared_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn free_pages_4k(vaddr: VirtAddr, size: usize) {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ free_page(vpage);
+ }
+} | This code probably belongs somewhere in the mm module, this doesn't seem specific to snp guest requests.
These functions should be unsafe. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,523 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Driver to send `SNP_GUEST_REQUEST` commands to the PSP. It can be any of the
+//! request or response command types defined in the SEV-SNP spec, regardless if it's
+//! a regular or an extended command.
+
+extern crate alloc;
+
+use crate::{
+ address::VirtAddr,
+ cpu::percpu::this_cpu_mut,
+ error::SvsmError,
+ greq::msg::{
+ SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType,
+ SNP_GUEST_REQ_MAX_DATA_SIZE,
+ },
+ locking::SpinLock,
+ mm::alloc::{allocate_page, allocate_pages, free_page, get_order},
+ protocols::errors::{SvsmReqError, SvsmResultCode},
+ sev::{
+ ghcb::GhcbError,
+ secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE},
+ },
+ types::{PAGE_SHIFT, PAGE_SIZE},
+ BIT,
+};
+use core::{cell::OnceCell, mem::size_of, slice::from_raw_parts_mut};
+
+/// Global `SNP_GUEST_REQUEST` driver instance
+static GREQ_DRIVER: SpinLock<OnceCell<SnpGuestRequestDriver>> = SpinLock::new(OnceCell::new());
+
+// Hypervisor error codes
+
+/// Buffer provided is too small
+const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// Hypervisor busy, try again
+const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+/// Class of the `SNP_GUEST_REQUEST` command: Regular or Extended
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+enum SnpGuestRequestClass {
+ Regular = 0,
+ Extended = 1,
+}
+
+/// `SNP_GUEST_REQUEST` driver
+#[derive(Debug, Default)]
+struct SnpGuestRequestDriver<'a> {
+ /// Shared page used for the `SNP_GUEST_REQUEST` request
+ request: Option<&'a mut SnpGuestRequestMsg>,
+ /// Shared page used for the `SNP_GUEST_REQUEST` response
+ response: Option<&'a mut SnpGuestRequestMsg>,
+ /// Protected page where we encrypt/decrypt the request/response messages
+ staging: Option<&'a mut SnpGuestRequestMsg>,
+ /// Data of an extended `SNP_GUEST_REQUEST` command used to carry
+ /// the SEV-SNP certificates
+ ext_data: Option<&'a mut SnpGuestRequestExtData>,
+ /// `certs` buffer size provided by the user in [`get_extended_report()`].
+ /// The driver provides a buffer of the same size to the hypervisor.
+ user_extdata_size: usize,
+ /// Each `SNP_GUEST_REQUEST` message contains a sequence number per VMPCK.
+ /// The sequence number is incremented with each message sent. Messages
+ /// sent by the guest to the PSP and by the PSP to the guest must be
+ /// delivered in order. If not, the PSP will reject subsequent messages
+ /// by the guest when it detects that the sequence numbers are out of sync.
+ ///
+ /// NOTE: If the vmpl field of a `SNP_GUEST_REQUEST` message is set to VMPL0,
+ /// then it must contain the VMPL0 sequence number and be protected (encrypted)
+ /// with the VMPCK0 key; additionally, if this message fails, the VMPCK0 key
+ /// must be disabled. The same idea applies to the other VMPL levels.
+ ///
+ /// The SVSM needs to support only VMPL0 `SNP_GUEST_REQUEST` commands because
+ /// other layers in the software stack (e.g. OVMF and guest kernel) can send
+ /// non-VMPL0 commands directly to PSP. Therefore, the SVSM needs to maintain
+ /// the sequence number and the VMPCK only for VMPL0.
+ vmpck0_seqno: u64,
+}
+
+fn set_pages_4k_encrypted(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_encrypted_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn set_pages_4k_shared(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_shared_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn free_pages_4k(vaddr: VirtAddr, size: usize) {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ free_page(vpage);
+ }
+}
+
+impl<'a> Drop for SnpGuestRequestDriver<'a> {
+ /// Drop for [`SnpGuestRequestDriver`]
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsg`] size is not equal to [`PAGE_SIZE`]
+ /// * [`SnpGuestRequestExtData`] size is not equal to [`SNP_GUEST_REQ_MAX_DATA_SIZE`]
+ fn drop(&mut self) {
+ const GREQ_MSG_SIZE: usize = size_of::<SnpGuestRequestMsg>();
+ const GREQ_EXT_DATA_SIZE: usize = size_of::<SnpGuestRequestExtData>();
+
+ assert_eq!(GREQ_MSG_SIZE, PAGE_SIZE);
+ assert_eq!(GREQ_EXT_DATA_SIZE, SNP_GUEST_REQ_MAX_DATA_SIZE);
+
+ // NOTE: Before having a page freed (returning it to the allocator),
+ // we must make sure that it is set to encrypted; otherwise, its better
+ // to leak the page.
+
+ // Free shared page used for the request
+ if let Some(request) = self.request.as_deref_mut() {
+ let vaddr = VirtAddr::from(request as *mut SnpGuestRequestMsg);
+ if this_cpu_mut().get_pgtable().set_encrypted_4k(vaddr).is_ok() {
+ free_page(vaddr);
+ } else {
+ log::warn!("GREQ: request: failed to set page to encrypted. Memory leak!");
+ }
+ }
+ // Free shared page used for the response
+ if let Some(response) = self.response.as_deref_mut() {
+ let vaddr = VirtAddr::from(response as *mut SnpGuestRequestMsg);
+ if this_cpu_mut().get_pgtable().set_encrypted_4k(vaddr).is_ok() {
+ free_page(vaddr);
+ } else {
+ log::warn!("GREQ: response: failed to set page to encrypted. Memory leak!");
+ }
+ }
+ // Free encrypted page used for the staging
+ if let Some(staging) = self.staging.as_deref_mut() {
+ let vaddr = VirtAddr::from(staging as *mut SnpGuestRequestMsg);
+ free_page(vaddr);
+ }
+ // Free shared pages used for the ext_data
+ if let Some(ext_data) = self.ext_data.as_deref_mut() {
+ let vaddr = VirtAddr::from(ext_data as *mut SnpGuestRequestExtData);
+ if set_pages_4k_encrypted(vaddr, GREQ_EXT_DATA_SIZE).is_ok() {
+ free_pages_4k(vaddr, GREQ_EXT_DATA_SIZE);
+ } else {
+ log::warn!("GREQ: ext_data: failed to set pages to encrypted. Memory leak!");
+ }
+ }
+ }
+}
+
+impl<'a> SnpGuestRequestDriver<'a> {
+ /// Get the last VMPCK0 sequence number accounted
+ fn seqno_last_used(&self) -> u64 {
+ self.vmpck0_seqno
+ }
+
+ /// Increase the VMPCK0 sequence number by two. In order to keep the
+ /// sequence number in-sync with the PSP, this is called only when the
+ /// `SNP_GUEST_REQUEST` response is received.
+ fn seqno_add_two(&mut self) {
+ self.vmpck0_seqno += 2;
+ }
+
+ /// Call the GHCB layer to send the encrypted SNP_GUEST_REQUEST message
+ /// to the PSP.
+ fn send(&mut self, req_class: SnpGuestRequestClass) -> Result<(), SvsmReqError> {
+ let request = self
+ .request
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let response = self
+ .response
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let ext_data = self
+ .ext_data
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+
+ response.clear();
+
+ if req_class == SnpGuestRequestClass::Extended {
+ this_cpu_mut().ghcb().guest_ext_request(
+ VirtAddr::from(request as *mut SnpGuestRequestMsg),
+ VirtAddr::from(response as *mut SnpGuestRequestMsg),
+ VirtAddr::from(ext_data as *mut SnpGuestRequestExtData),
+ self.user_extdata_npages(),
+ )?;
+ } else {
+ this_cpu_mut().ghcb().guest_request(
+ VirtAddr::from(request as *mut SnpGuestRequestMsg),
+ VirtAddr::from(response as *mut SnpGuestRequestMsg),
+ )?;
+ }
+
+ self.seqno_add_two();
+
+ Ok(())
+ }
+
+ fn copy_staging_to_request(&mut self) -> Result<(), SvsmReqError> {
+ let staging = self
+ .staging
+ .as_deref()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let request = self
+ .request
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ *request = *staging;
+ Ok(())
+ }
+
+ fn copy_response_to_staging(&mut self) -> Result<(), SvsmReqError> {
+ let response = self
+ .response
+ .as_deref()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let staging = self
+ .staging
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ *staging = *response;
+ Ok(())
+ }
+
+ /// Send the provided VMPL0 `SNP_GUEST_REQUEST` command to the PSP.
+ ///
+ /// The command will be encrypted using AES-256 GCM.
+ ///
+ /// # Arguments
+ ///
+ /// * `req_class`: whether this is a regular or extended `SNP_GUEST_REQUEST` command
+ /// * `msg_type`: type of the command stored in `buffer`, e.g. [`SNP_MSG_REPORT_REQ`]
+ /// * `buffer`: buffer with the `SNP_GUEST_REQUEST` command to be sent.
+ /// The same buffer will also be used to store the response.
+ /// * `command_len`: Size (in bytes) of the command stored in `buffer`
+ ///
+ /// # Returns
+ ///
+ /// * Success:
+ /// * `usize`: Size (in bytes) of the response stored in `buffer`
+ /// * Error:
+ /// * [`SvsmReqError`]
+ fn send_request(
+ &mut self,
+ req_class: SnpGuestRequestClass,
+ msg_type: SnpGuestRequestMsgType,
+ buffer: &mut [u8],
+ command_len: usize,
+ ) -> Result<usize, SvsmReqError> {
+ if is_vmpck0_clear() || self.staging.is_none() {
+ return Err(SvsmReqError::invalid_request());
+ }
+ // Message sequence number overflow, the driver will not able
+ // to send subsequent `SNP_GUEST_REQUEST` messages to the PSP.
+ // The sequence number is restored only when the guest is rebooted.
+ let Some(msg_seqno) = self.seqno_last_used().checked_add(1) else {
+ log::error!("SNP_GUEST_REQUEST: sequence number overflow");
+ disable_vmpck0(); | We call `disable_vmpck0` for a bunch of error cases. Have you considered additionally calling `disable_vmpck0` in the SVSM's panic handler? |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,523 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Driver to send `SNP_GUEST_REQUEST` commands to the PSP. It can be any of the
+//! request or response command types defined in the SEV-SNP spec, regardless if it's
+//! a regular or an extended command.
+
+extern crate alloc;
+
+use crate::{
+ address::VirtAddr,
+ cpu::percpu::this_cpu_mut,
+ error::SvsmError,
+ greq::msg::{
+ SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType,
+ SNP_GUEST_REQ_MAX_DATA_SIZE,
+ },
+ locking::SpinLock,
+ mm::alloc::{allocate_page, allocate_pages, free_page, get_order},
+ protocols::errors::{SvsmReqError, SvsmResultCode},
+ sev::{
+ ghcb::GhcbError,
+ secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE},
+ },
+ types::{PAGE_SHIFT, PAGE_SIZE},
+ BIT,
+};
+use core::{cell::OnceCell, mem::size_of, slice::from_raw_parts_mut};
+
+/// Global `SNP_GUEST_REQUEST` driver instance
+static GREQ_DRIVER: SpinLock<OnceCell<SnpGuestRequestDriver>> = SpinLock::new(OnceCell::new());
+
+// Hypervisor error codes
+
+/// Buffer provided is too small
+const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// Hypervisor busy, try again
+const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+/// Class of the `SNP_GUEST_REQUEST` command: Regular or Extended
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+enum SnpGuestRequestClass {
+ Regular = 0,
+ Extended = 1,
+}
+
+/// `SNP_GUEST_REQUEST` driver
+#[derive(Debug, Default)]
+struct SnpGuestRequestDriver<'a> {
+ /// Shared page used for the `SNP_GUEST_REQUEST` request
+ request: Option<&'a mut SnpGuestRequestMsg>,
+ /// Shared page used for the `SNP_GUEST_REQUEST` response
+ response: Option<&'a mut SnpGuestRequestMsg>,
+ /// Protected page where we encrypt/decrypt the request/response messages
+ staging: Option<&'a mut SnpGuestRequestMsg>,
+ /// Data of an extended `SNP_GUEST_REQUEST` command used to carry
+ /// the SEV-SNP certificates
+ ext_data: Option<&'a mut SnpGuestRequestExtData>,
+ /// `certs` buffer size provided by the user in [`get_extended_report()`].
+ /// The driver provides a buffer of the same size to the hypervisor.
+ user_extdata_size: usize,
+ /// Each `SNP_GUEST_REQUEST` message contains a sequence number per VMPCK.
+ /// The sequence number is incremented with each message sent. Messages
+ /// sent by the guest to the PSP and by the PSP to the guest must be
+ /// delivered in order. If not, the PSP will reject subsequent messages
+ /// by the guest when it detects that the sequence numbers are out of sync.
+ ///
+ /// NOTE: If the vmpl field of a `SNP_GUEST_REQUEST` message is set to VMPL0,
+ /// then it must contain the VMPL0 sequence number and be protected (encrypted)
+ /// with the VMPCK0 key; additionally, if this message fails, the VMPCK0 key
+ /// must be disabled. The same idea applies to the other VMPL levels.
+ ///
+ /// The SVSM needs to support only VMPL0 `SNP_GUEST_REQUEST` commands because
+ /// other layers in the software stack (e.g. OVMF and guest kernel) can send
+ /// non-VMPL0 commands directly to PSP. Therefore, the SVSM needs to maintain
+ /// the sequence number and the VMPCK only for VMPL0.
+ vmpck0_seqno: u64,
+}
+
+fn set_pages_4k_encrypted(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_encrypted_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn set_pages_4k_shared(vaddr: VirtAddr, size: usize) -> Result<(), SvsmReqError> {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ this_cpu_mut().get_pgtable().set_shared_4k(vpage)?;
+ }
+ Ok(())
+}
+
+fn free_pages_4k(vaddr: VirtAddr, size: usize) {
+ let start = usize::from(vaddr);
+ let end = start + size;
+ for page in (start..end).step_by(PAGE_SIZE) {
+ let vpage = VirtAddr::from(page);
+ free_page(vpage);
+ }
+}
+
+impl<'a> Drop for SnpGuestRequestDriver<'a> {
+ /// Drop for [`SnpGuestRequestDriver`]
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsg`] size is not equal to [`PAGE_SIZE`]
+ /// * [`SnpGuestRequestExtData`] size is not equal to [`SNP_GUEST_REQ_MAX_DATA_SIZE`]
+ fn drop(&mut self) {
+ const GREQ_MSG_SIZE: usize = size_of::<SnpGuestRequestMsg>();
+ const GREQ_EXT_DATA_SIZE: usize = size_of::<SnpGuestRequestExtData>();
+
+ assert_eq!(GREQ_MSG_SIZE, PAGE_SIZE);
+ assert_eq!(GREQ_EXT_DATA_SIZE, SNP_GUEST_REQ_MAX_DATA_SIZE);
+
+ // NOTE: Before having a page freed (returning it to the allocator),
+ // we must make sure that it is set to encrypted; otherwise, its better
+ // to leak the page.
+
+ // Free shared page used for the request
+ if let Some(request) = self.request.as_deref_mut() {
+ let vaddr = VirtAddr::from(request as *mut SnpGuestRequestMsg);
+ if this_cpu_mut().get_pgtable().set_encrypted_4k(vaddr).is_ok() {
+ free_page(vaddr);
+ } else {
+ log::warn!("GREQ: request: failed to set page to encrypted. Memory leak!");
+ }
+ }
+ // Free shared page used for the response
+ if let Some(response) = self.response.as_deref_mut() {
+ let vaddr = VirtAddr::from(response as *mut SnpGuestRequestMsg);
+ if this_cpu_mut().get_pgtable().set_encrypted_4k(vaddr).is_ok() {
+ free_page(vaddr);
+ } else {
+ log::warn!("GREQ: response: failed to set page to encrypted. Memory leak!");
+ }
+ }
+ // Free encrypted page used for the staging
+ if let Some(staging) = self.staging.as_deref_mut() {
+ let vaddr = VirtAddr::from(staging as *mut SnpGuestRequestMsg);
+ free_page(vaddr);
+ }
+ // Free shared pages used for the ext_data
+ if let Some(ext_data) = self.ext_data.as_deref_mut() {
+ let vaddr = VirtAddr::from(ext_data as *mut SnpGuestRequestExtData);
+ if set_pages_4k_encrypted(vaddr, GREQ_EXT_DATA_SIZE).is_ok() {
+ free_pages_4k(vaddr, GREQ_EXT_DATA_SIZE);
+ } else {
+ log::warn!("GREQ: ext_data: failed to set pages to encrypted. Memory leak!");
+ }
+ }
+ }
+}
+
+impl<'a> SnpGuestRequestDriver<'a> {
+ /// Get the last VMPCK0 sequence number accounted
+ fn seqno_last_used(&self) -> u64 {
+ self.vmpck0_seqno
+ }
+
+ /// Increase the VMPCK0 sequence number by two. In order to keep the
+ /// sequence number in-sync with the PSP, this is called only when the
+ /// `SNP_GUEST_REQUEST` response is received.
+ fn seqno_add_two(&mut self) {
+ self.vmpck0_seqno += 2;
+ }
+
+ /// Call the GHCB layer to send the encrypted SNP_GUEST_REQUEST message
+ /// to the PSP.
+ fn send(&mut self, req_class: SnpGuestRequestClass) -> Result<(), SvsmReqError> {
+ let request = self
+ .request
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let response = self
+ .response
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let ext_data = self
+ .ext_data
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+
+ response.clear();
+
+ if req_class == SnpGuestRequestClass::Extended {
+ this_cpu_mut().ghcb().guest_ext_request(
+ VirtAddr::from(request as *mut SnpGuestRequestMsg),
+ VirtAddr::from(response as *mut SnpGuestRequestMsg),
+ VirtAddr::from(ext_data as *mut SnpGuestRequestExtData),
+ self.user_extdata_npages(),
+ )?;
+ } else {
+ this_cpu_mut().ghcb().guest_request(
+ VirtAddr::from(request as *mut SnpGuestRequestMsg),
+ VirtAddr::from(response as *mut SnpGuestRequestMsg),
+ )?;
+ }
+
+ self.seqno_add_two();
+
+ Ok(())
+ }
+
+ fn copy_staging_to_request(&mut self) -> Result<(), SvsmReqError> {
+ let staging = self
+ .staging
+ .as_deref()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let request = self
+ .request
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ *request = *staging;
+ Ok(())
+ }
+
+ fn copy_response_to_staging(&mut self) -> Result<(), SvsmReqError> {
+ let response = self
+ .response
+ .as_deref()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ let staging = self
+ .staging
+ .as_deref_mut()
+ .ok_or_else(SvsmReqError::invalid_request)?;
+ *staging = *response;
+ Ok(())
+ }
+
+ /// Send the provided VMPL0 `SNP_GUEST_REQUEST` command to the PSP.
+ ///
+ /// The command will be encrypted using AES-256 GCM.
+ ///
+ /// # Arguments
+ ///
+ /// * `req_class`: whether this is a regular or extended `SNP_GUEST_REQUEST` command
+ /// * `msg_type`: type of the command stored in `buffer`, e.g. [`SNP_MSG_REPORT_REQ`]
+ /// * `buffer`: buffer with the `SNP_GUEST_REQUEST` command to be sent.
+ /// The same buffer will also be used to store the response.
+ /// * `command_len`: Size (in bytes) of the command stored in `buffer`
+ ///
+ /// # Returns
+ ///
+ /// * Success:
+ /// * `usize`: Size (in bytes) of the response stored in `buffer`
+ /// * Error:
+ /// * [`SvsmReqError`]
+ fn send_request(
+ &mut self,
+ req_class: SnpGuestRequestClass,
+ msg_type: SnpGuestRequestMsgType,
+ buffer: &mut [u8],
+ command_len: usize,
+ ) -> Result<usize, SvsmReqError> {
+ if is_vmpck0_clear() || self.staging.is_none() {
+ return Err(SvsmReqError::invalid_request());
+ }
+ // Message sequence number overflow, the driver will not able
+ // to send subsequent `SNP_GUEST_REQUEST` messages to the PSP.
+ // The sequence number is restored only when the guest is rebooted.
+ let Some(msg_seqno) = self.seqno_last_used().checked_add(1) else {
+ log::error!("SNP_GUEST_REQUEST: sequence number overflow");
+ disable_vmpck0();
+ return Err(SvsmReqError::invalid_request());
+ };
+
+ // VMPL0 `SNP_GUEST_REQUEST` commands are encrypted with the VMPCK0 key
+ let vmpck0: [u8; VMPCK_SIZE] = get_vmpck0();
+
+ let inbuf = &buffer[..command_len];
+
+ // For security reasons, encrypt the message in protected memory (staging)
+ // and then copy the result to shared memory (request)
+ self.staging
+ .as_deref_mut()
+ .unwrap()
+ .encrypt_set(msg_type, msg_seqno, &vmpck0, inbuf)?;
+ self.copy_staging_to_request()?;
+
+ if let Err(e) = self.send(req_class) {
+ if let SvsmReqError::FatalError(SvsmError::Ghcb(GhcbError::VmgexitError(_rbx, info2))) =
+ e
+ {
+ // For some reason the hypervisor did not forward the request to the PSP.
+ //
+ // Because the message sequence number is used as part of the AES-GCM IV, it is important that the
+ // guest retry the request before allowing another request to be performed so that the IV cannot be
+ // reused on a new message payload.
+ match info2 & 0xffff_ffff_0000_0000u64 {
+ // The certificate buffer provided is too small.
+ SNP_GUEST_REQ_INVALID_LEN => {
+ if req_class == SnpGuestRequestClass::Extended {
+ if let Err(e1) = self.send(SnpGuestRequestClass::Regular) {
+ log::error!(
+ "SNP_GUEST_REQ_INVALID_LEN. Aborting, request resend failed"
+ );
+ disable_vmpck0();
+ return Err(e1);
+ }
+ return Err(e);
+ } else {
+ // We sent a regular SNP_GUEST_REQUEST, but the hypervisor returned
+ // an error code that is exclusive for extended SNP_GUEST_REQUEST
+ disable_vmpck0();
+ return Err(SvsmReqError::invalid_request());
+ }
+ }
+ // The hypervisor is busy.
+ SNP_GUEST_REQ_ERR_BUSY => {
+ if let Err(e2) = self.send(req_class) {
+ log::error!("SNP_GUEST_REQ_ERR_BUSY. Aborting, request resend failed");
+ disable_vmpck0();
+ return Err(e2);
+ }
+ // ... request resend worked, continue normally.
+ }
+ // Failed for unknown reason. Status codes can be found in
+ // the AMD SEV-SNP spec or in the linux kernel include/uapi/linux/psp-sev.h
+ _ => {
+ log::error!("SNP_GUEST_REQUEST failed, unknown error code={}\n", info2);
+ disable_vmpck0();
+ return Err(e);
+ }
+ }
+ }
+ }
+
+ let msg_seqno = self.seqno_last_used();
+ let resp_msg_type = SnpGuestRequestMsgType::try_from(msg_type as u8 + 1)?;
+
+ // For security reasons, decrypt the message in protected memory (staging)
+ self.copy_response_to_staging()?;
+ let result = self.staging.as_deref_mut().unwrap().decrypt_get(
+ resp_msg_type,
+ msg_seqno,
+ &vmpck0,
+ buffer,
+ );
+
+ if let Err(e) = result {
+ match e {
+ // The buffer provided is too small to store the unwrapped response.
+ // There is no need to clear the VMPCK0, just report it as invalid parameter.
+ SvsmReqError::RequestError(SvsmResultCode::INVALID_PARAMETER) => (),
+ _ => disable_vmpck0(),
+ }
+ }
+
+ result
+ }
+
+ /// Send the provided regular `SNP_GUEST_REQUEST` command to the PSP
+ pub fn send_regular_guest_request(
+ &mut self,
+ msg_type: SnpGuestRequestMsgType,
+ buffer: &mut [u8],
+ command_len: usize,
+ ) -> Result<usize, SvsmReqError> {
+ self.send_request(SnpGuestRequestClass::Regular, msg_type, buffer, command_len)
+ }
+
+ /// Buffer size in pages that will be provided to the hypervisor
+ pub fn user_extdata_npages(&self) -> u64 {
+ (self.user_extdata_size >> PAGE_SHIFT) as u64
+ }
+
+ /// Set the user_extdata_size to `n` and clear the first `n` bytes from `ext_data`
+ pub fn set_user_extdata_size(&mut self, n: usize) -> Result<(), SvsmReqError> {
+ if self.ext_data.is_none() {
+ return Err(SvsmReqError::invalid_request());
+ }
+ // At least one page
+ if (n >> PAGE_SHIFT) == 0 {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+ self.ext_data.as_deref_mut().unwrap().nclear(n)?;
+ self.user_extdata_size = n;
+
+ Ok(())
+ }
+
+ /// Send the provided extended `SNP_GUEST_REQUEST` command to the PSP
+ pub fn send_extended_guest_request(
+ &mut self,
+ msg_type: SnpGuestRequestMsgType,
+ buffer: &mut [u8],
+ command_len: usize,
+ certs: &mut [u8],
+ ) -> Result<usize, SvsmReqError> {
+ if self.ext_data.is_none() {
+ return Err(SvsmReqError::invalid_request());
+ };
+
+ let certs_len = certs.len();
+ self.set_user_extdata_size(certs_len)?;
+
+ let outbuf_len: usize = self.send_request(
+ SnpGuestRequestClass::Extended,
+ msg_type,
+ buffer,
+ command_len,
+ )?;
+
+ let ext_data = self.ext_data.as_deref_mut().unwrap();
+
+ // The SEV-SNP certificates can be used to verify the attestation report. At this point, a zeroed
+ // ext_data buffer indicates that the certificates were not imported.
+ // The VM owner can import them from the host using the virtee/snphost project
+ if ext_data.is_nclear(certs_len) {
+ log::warn!("SEV-SNP certificates not found. Make sure they were loaded from the host.");
+ } else {
+ ext_data.copy_to_slice(certs)?;
+ }
+
+ Ok(outbuf_len)
+ }
+
+ /// Create a new [`SnpGuestRequestDriver`]
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsg`] size is not equal to [`PAGE_SIZE`]
+ /// * [`SnpGuestRequestExtData`] size is not equal to [`SNP_GUEST_REQ_MAX_DATA_SIZE`]
+ pub fn new() -> Result<Self, SvsmReqError> {
+ const GREQ_MSG_SIZE: usize = size_of::<SnpGuestRequestMsg>();
+ const GREQ_EXT_DATA_SIZE: usize = size_of::<SnpGuestRequestExtData>();
+
+ assert_eq!(GREQ_MSG_SIZE, PAGE_SIZE);
+ assert_eq!(GREQ_EXT_DATA_SIZE, SNP_GUEST_REQ_MAX_DATA_SIZE);
+
+ let mut driver = SnpGuestRequestDriver::default();
+
+ // We should NOT implement the allocation steps below in their respective structs because they
+ // would make the implementation and workflow of the drop() and new() methods overly complex.
+ // By keeping them here we just need to deal with some code verbosity.
+
+ // Shared page for the request
+ let vaddr: VirtAddr = allocate_page()?;
+ driver.request = Some(unsafe {
+ &mut from_raw_parts_mut(vaddr.as_mut_ptr::<SnpGuestRequestMsg>(), GREQ_MSG_SIZE)[0]
+ });
+ this_cpu_mut().get_pgtable().set_shared_4k(vaddr)?;
+
+ // Shared page for the response
+ let vaddr: VirtAddr = allocate_page()?;
+ driver.response = Some(unsafe {
+ &mut from_raw_parts_mut(vaddr.as_mut_ptr::<SnpGuestRequestMsg>(), GREQ_MSG_SIZE)[0]
+ });
+ this_cpu_mut().get_pgtable().set_shared_4k(vaddr)?;
+
+ // Encrypted page for the staging
+ let vaddr: VirtAddr = allocate_page()?;
+ driver.staging = Some(unsafe {
+ &mut from_raw_parts_mut(vaddr.as_mut_ptr::<SnpGuestRequestMsg>(), GREQ_MSG_SIZE)[0]
+ });
+
+ // Shared pages for the ext_data
+ let vaddr: VirtAddr = allocate_pages(get_order(GREQ_EXT_DATA_SIZE))?;
+ driver.ext_data = Some(unsafe {
+ &mut from_raw_parts_mut(
+ vaddr.as_mut_ptr::<SnpGuestRequestExtData>(),
+ GREQ_EXT_DATA_SIZE,
+ )[0]
+ });
+ set_pages_4k_shared(vaddr, GREQ_EXT_DATA_SIZE)?;
+
+ Ok(driver)
+ }
+}
+
+/// Initialize the global `SnpGuestRequestDriver`
+///
+/// # Panics
+///
+/// This function panics if we fail to initialize any of the `SnpGuestRequestDriver` fields.
+pub fn guest_request_driver_init() {
+ let cell = GREQ_DRIVER.lock();
+ let _ = cell.get_or_init(|| {
+ SnpGuestRequestDriver::new()
+ .unwrap_or_else(|e| panic!("SnpGuestRequestDriver failed to initialize, e={e:?}")) | ```suggestion
.expect("SnpGuestRequestDriver failed to initialize")
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,438 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use crate::{
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+use core::{
+ mem::size_of,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+pub const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+pub const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ authtag: [0; 32],
+ msg_seqno,
+ rsvd1: [0; 8],
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ let new_tag_len = new_tag.len();
+
+ if new_tag_len > self.authtag.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ self.authtag[..new_tag_len].copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ pub fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ pub fn get_aad_slice(&self) -> &[u8] {
+ let self_vaddr = self as *const _ as *const u8;
+ let algo_vaddr = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_vaddr.offset_from(self_vaddr) } as usize;
+
+ unsafe { from_raw_parts(algo_vaddr, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ pub fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+impl SnpGuestRequestMsg {
+ /// Fill the [`SnpGuestRequestMsg`] fields with zeros
+ pub fn clear(&mut self) {
+ self.hdr.as_slice_mut().fill(0);
+ self.pld.fill(0);
+ }
+
+ /// Encrypt the provided `SNP_GUEST_REQUEST` command and store the result in the actual message
+ ///
+ /// The command will be encrypted using AES-256 GCM and part of the message header will be
+ /// used as additional authenticated data (AAD).
+ ///
+ /// # Arguments
+ ///
+ /// * `msg_type`: Type of the command stored in the `command` buffer.
+ /// * `msg_seqno`: VMPL0 sequence number to be used in the message. The PSP will reject
+ /// subsequent messages when it detects that the sequence numbers are
+ /// out of sync. The sequence number is also used as initialization
+ /// vector (IV) in encryption.
+ /// * `vmpck0`: VMPCK0 key, it will be used to encrypt the command.
+ /// * `command`: command to be encrypted.
+ ///
+ /// # Returns
+ ///
+ /// () on success and [`SvsmReqError`] on error.
+ ///
+ /// # Panic
+ ///
+ /// * The computed encrypted command does not have the same size of the original command.
+ pub fn encrypt_set(
+ &mut self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ command: &[u8],
+ ) -> Result<(), SvsmReqError> {
+ let payload_size_u16 =
+ u16::try_from(command.len()).map_err(|_| SvsmReqError::invalid_parameter())?;
+
+ let mut msg_hdr = SnpGuestRequestMsgHdr::new(payload_size_u16, msg_type, msg_seqno);
+ let aad: &[u8] = msg_hdr.get_aad_slice();
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+
+ self.pld.fill(0);
+
+ // Encrypt the provided command and store the result in the message payload
+ let pld_len: usize = Aes256Gcm::encrypt(&iv, vmpck0, aad, command, &mut self.pld)?;
+
+ // In the Aes256Gcm encrypt API, the authtag is postfixed (comes after the encrypted payload)
+ let authtag_offset: usize = pld_len - AUTHTAG_SIZE;
+ let authtag = &mut self.pld[authtag_offset..pld_len];
+
+ // The original and the encrypted command should have the same size
+ assert_eq!(command.len(), authtag_offset);
+
+ // Move the authtag to the message header
+ msg_hdr.set_authtag(authtag)?;
+ authtag.fill(0);
+
+ // Set the new mssage header using the Copy trait
+ self.hdr = msg_hdr;
+
+ Ok(())
+ }
+
+ /// Decrypt the `SNP_GUEST_REQUEST` command stored in the message and store the decrypted command in
+ /// the provided `outbuf`.
+ ///
+ /// The command stored in the message payload is usually a response command received from the PSP.
+ /// It will be decrypted using AES-256 GCM and part of the message header will be used as
+ /// additional authenticated data (AAD).
+ ///
+ /// # Arguments
+ ///
+ /// * `msg_type`: Type of the command stored in the message payload
+ /// * `msg_seqno`: VMPL0 sequence number that was used in the message.
+ /// * `vmpck0`: VMPCK0 key, it will be used to decrypt the message
+ /// * `outbuf`: buffer that will be used to store the decrypted message payload
+ ///
+ /// # Returns
+ ///
+ /// * Success
+ /// * usize: Number of bytes written to `outbuf`
+ /// * Error
+ /// * [`SvsmReqError`]
+ pub fn decrypt_get(
+ &mut self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ vmpck0: &[u8; VMPCK_SIZE],
+ outbuf: &mut [u8],
+ ) -> Result<usize, SvsmReqError> {
+ self.hdr.validate(msg_type, msg_seqno)?;
+
+ let iv: [u8; IV_SIZE] = build_iv(msg_seqno);
+ let aad: &[u8] = self.hdr.get_aad_slice();
+
+ // In the Aes256Gcm decrypt API, the authtag must be provided postfix in the inbuf
+ let ciphertext_end = usize::from(self.hdr.msg_sz);
+ let tag_end: usize = ciphertext_end + AUTHTAG_SIZE;
+
+ // The message payload must be large enough to hold the ciphertext and
+ // the authentication tag.
+ //
+ // The outbuf must be large enough to hold the plaintext, which should
+ // have the same size as the ciphertext.
+ if tag_end > self.pld.len() || ciphertext_end > outbuf.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ self.pld[ciphertext_end..tag_end].copy_from_slice(&self.hdr.authtag[..AUTHTAG_SIZE]);
+
+ // Payload with postfixed authtag
+ let inbuf = &self.pld[..tag_end];
+
+ let outbuf_len: usize = Aes256Gcm::decrypt(&iv, vmpck0, aad, inbuf, outbuf)?;
+
+ Ok(outbuf_len)
+ }
+}
+
+/// Build the initialization vector for AES-256 GCM
+fn build_iv(msg_seqno: u64) -> [u8; IV_SIZE] {
+ const U64_SIZE: usize = size_of::<u64>();
+ let mut iv = [0u8; IV_SIZE];
+
+ iv[..U64_SIZE].copy_from_slice(&msg_seqno.to_ne_bytes());
+ iv
+}
+
+/// Data page(s) the hypervisor will use to store certificate data in
+/// an extended `SNP_GUEST_REQUEST`
+#[derive(Debug)]
+pub struct SnpGuestRequestExtData {
+ /// According to the GHCB spec, the data page(s) must be contiguous pages if
+ /// supplying more than one page and all certificate pages must be
+ /// assigned to the hypervisor (shared).
+ data: [u8; SNP_GUEST_REQ_MAX_DATA_SIZE],
+}
+
+impl SnpGuestRequestExtData {
+ /// Clear the first `n` bytes from data
+ pub fn nclear(&mut self, n: usize) -> Result<(), SvsmReqError> {
+ if n > self.data.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+ self.data[..n].fill(0); | ```suggestion
self.data
.get_mut(..n)
.ok_or_else(SvsmReqError::invalid_parameter)?
.fill(0);
```
Also applies to `copy_to_slice` and `is_nclear`. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,199 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` command to request an attestation report.
+
+extern crate alloc;
+
+use crate::protocols::errors::SvsmReqError;
+
+use core::{mem::size_of, slice::from_raw_parts};
+
+/// Size of the `SnpReportRequest.user_data`
+pub const USER_DATA_SIZE: usize = 64;
+
+/// MSG_REPORT_REQ payload format (AMD SEV-SNP spec. table 20)
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpReportRequest {
+ /// Guest-provided data to be included in the attestation report
+ /// REPORT_DATA (512 bits)
+ user_data: [u8; USER_DATA_SIZE],
+ /// The VMPL to put in the attestation report
+ vmpl: u32,
+ /// 31:2 - Reserved
+ /// 1:0 - KEY_SEL. Selects which key to use for derivation
+ /// 0: If VLEK is installed, sign with VLEK. Otherwise, sign with VCEK
+ /// 1: Sign with VCEK
+ /// 2: Sign with VLEK
+ /// 3: Reserved
+ flags: u32,
+ /// Reserved, must be zero
+ rsvd: [u8; 24],
+}
+
+impl SnpReportRequest {
+ /// Take a slice and return a reference for Self
+ pub fn try_from_as_ref(buffer: &[u8]) -> Result<&Self, SvsmReqError> {
+ const REPORT_REQUEST_SIZE: usize = size_of::<SnpReportRequest>();
+
+ if REPORT_REQUEST_SIZE > buffer.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ let request: &SnpReportRequest = unsafe {
+ &from_raw_parts(buffer.as_ptr() as *const _ as *const SnpReportRequest, 1)[0]
+ }; | ```suggestion
let request = unsafe { &*buffer.as_ptr().cast::<SnpReportRequest>() };
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,199 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` command to request an attestation report.
+
+extern crate alloc;
+
+use crate::protocols::errors::SvsmReqError;
+
+use core::{mem::size_of, slice::from_raw_parts};
+
+/// Size of the `SnpReportRequest.user_data`
+pub const USER_DATA_SIZE: usize = 64;
+
+/// MSG_REPORT_REQ payload format (AMD SEV-SNP spec. table 20)
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpReportRequest {
+ /// Guest-provided data to be included in the attestation report
+ /// REPORT_DATA (512 bits)
+ user_data: [u8; USER_DATA_SIZE],
+ /// The VMPL to put in the attestation report
+ vmpl: u32,
+ /// 31:2 - Reserved
+ /// 1:0 - KEY_SEL. Selects which key to use for derivation
+ /// 0: If VLEK is installed, sign with VLEK. Otherwise, sign with VCEK
+ /// 1: Sign with VCEK
+ /// 2: Sign with VLEK
+ /// 3: Reserved
+ flags: u32,
+ /// Reserved, must be zero
+ rsvd: [u8; 24],
+}
+
+impl SnpReportRequest {
+ /// Take a slice and return a reference for Self
+ pub fn try_from_as_ref(buffer: &[u8]) -> Result<&Self, SvsmReqError> {
+ const REPORT_REQUEST_SIZE: usize = size_of::<SnpReportRequest>();
+
+ if REPORT_REQUEST_SIZE > buffer.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ let request: &SnpReportRequest = unsafe {
+ &from_raw_parts(buffer.as_ptr() as *const _ as *const SnpReportRequest, 1)[0]
+ };
+ if !request.is_reserved_clear() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+ Ok(request)
+ }
+
+ pub fn is_vmpl0(&self) -> bool {
+ self.vmpl == 0
+ }
+
+ /// Check if the reserved field is clear
+ fn is_reserved_clear(&self) -> bool {
+ self.rsvd.into_iter().all(|e| e == 0)
+ }
+}
+
+/// MSG_REPORT_RSP payload format (AMD SEV-SNP spec. table 23)
+#[repr(C)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpReportResponse {
+ /// The status of the key derivation operation, see [SnpReportResponseStatus]
+ status: u32,
+ /// Size in bytes of the report
+ report_size: u32,
+ /// Reserved
+ _reserved: [u8; 24],
+ /// The attestation report generated by firmware
+ report: AttestationReport,
+}
+
+/// Supported values for SnpReportResponse.status
+#[repr(u32)]
+#[derive(Clone, Copy, Debug)]
+pub enum SnpReportResponseStatus {
+ Success = 0,
+ InvalidParameters = 0x16,
+ InvalidKeySelection = 0x27,
+}
+
+impl SnpReportResponse {
+ pub fn try_from_as_ref(buffer: &[u8]) -> Result<&Self, SvsmReqError> {
+ const REPORT_RESPONSE_SIZE: usize = size_of::<SnpReportResponse>();
+
+ if REPORT_RESPONSE_SIZE > buffer.len() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ let response: &SnpReportResponse = unsafe {
+ &from_raw_parts(buffer.as_ptr() as *const _ as *const SnpReportResponse, 1)[0]
+ }; | ```suggestion
let response: &SnpReportResponse = unsafe { &*buffer.as_ptr().cast::<SnpReportResponse>() };
``` |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,199 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! `SNP_GUEST_REQUEST` command to request an attestation report.
+
+extern crate alloc;
+
+use crate::protocols::errors::SvsmReqError;
+
+use core::{mem::size_of, slice::from_raw_parts};
+
+/// Size of the `SnpReportRequest.user_data`
+pub const USER_DATA_SIZE: usize = 64;
+
+/// MSG_REPORT_REQ payload format (AMD SEV-SNP spec. table 20)
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpReportRequest {
+ /// Guest-provided data to be included in the attestation report
+ /// REPORT_DATA (512 bits)
+ user_data: [u8; USER_DATA_SIZE],
+ /// The VMPL to put in the attestation report
+ vmpl: u32,
+ /// 31:2 - Reserved
+ /// 1:0 - KEY_SEL. Selects which key to use for derivation
+ /// 0: If VLEK is installed, sign with VLEK. Otherwise, sign with VCEK
+ /// 1: Sign with VCEK
+ /// 2: Sign with VLEK
+ /// 3: Reserved
+ flags: u32,
+ /// Reserved, must be zero
+ rsvd: [u8; 24],
+}
+
+impl SnpReportRequest {
+ /// Take a slice and return a reference for Self
+ pub fn try_from_as_ref(buffer: &[u8]) -> Result<&Self, SvsmReqError> {
+ const REPORT_REQUEST_SIZE: usize = size_of::<SnpReportRequest>();
+
+ if REPORT_REQUEST_SIZE > buffer.len() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+
+ let request: &SnpReportRequest = unsafe {
+ &from_raw_parts(buffer.as_ptr() as *const _ as *const SnpReportRequest, 1)[0]
+ };
+ if !request.is_reserved_clear() {
+ return Err(SvsmReqError::invalid_parameter());
+ }
+ Ok(request)
+ }
+
+ pub fn is_vmpl0(&self) -> bool {
+ self.vmpl == 0
+ }
+
+ /// Check if the reserved field is clear
+ fn is_reserved_clear(&self) -> bool {
+ self.rsvd.into_iter().all(|e| e == 0)
+ }
+}
+
+/// MSG_REPORT_RSP payload format (AMD SEV-SNP spec. table 23)
+#[repr(C)] | This needs to be `repr(C, packed)` if we want to create references from unaligned byte slices. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,572 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use alloc::{
+ alloc::{alloc, Layout},
+ boxed::Box,
+};
+use core::{
+ mem::size_of,
+ ptr::copy_nonoverlapping,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+use crate::{
+ address::{Address, VirtAddr},
+ cpu::percpu::this_cpu_mut,
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[repr(C, packed)]
+#[derive(Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ msg_seqno,
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ msg_vmpck: 0,
+ ..Default::default()
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ self.authtag
+ .get_mut(..new_tag.len())
+ .ok_or_else(SvsmReqError::invalid_parameter)?
+ .copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ fn get_aad_slice(&self) -> &[u8] {
+ let self_gva = self as *const _ as *const u8;
+ let algo_gva = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_gva.offset_from(self_gva) } as usize;
+
+ unsafe { from_raw_parts(algo_gva, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+
+ fn copy_from(&mut self, src: &SnpGuestRequestMsgHdr) {
+ unsafe { copy_nonoverlapping(src, self, 1) };
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ if self.set_encrypted().is_err() {
+ log::error!("SnpGuestRequestMsg: failed to set page to encrypted. Memory leak!");
+ let boxed = unsafe { Box::from_raw(self) };
+ core::mem::forget(boxed);
+ }
+ }
+}
+
+impl SnpGuestRequestMsg {
+ // Allocate the object in the heap without going through stack as
+ // this is a large object
+ //
+ // # Panic
+ //
+ // * Memory allocated is not page aligned or Self does not
+ // fit into a page
+ pub fn boxed_new() -> Result<Box<Self>, SvsmReqError> {
+ let layout = Layout::new::<Self>();
+
+ // The GHCB spec says it has to fit in one page and be page aligned
+ assert!(layout.size() <= PAGE_SIZE);
+
+ unsafe {
+ let addr = alloc(layout);
+ if addr.is_null() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ assert!(VirtAddr::from(addr).is_page_aligned());
+
+ // TODO: have a zeroed page. However, the PSP always returns
+ // the error code 22 (INVALID_PARAM) if the page is written with
+ // zeros here or earlier (e.g. alloc_zeroed, allocate_zeroed_page) | I don't think this is related to the actual content of the page. I also get error 22 if I replace `alloc` with `allocate_page`. |
svsm | github_2023 | others | 69 | coconut-svsm | roy-hopkins | @@ -0,0 +1,572 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use alloc::{
+ alloc::{alloc, Layout},
+ boxed::Box,
+};
+use core::{
+ mem::size_of,
+ ptr::copy_nonoverlapping,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+use crate::{
+ address::{Address, VirtAddr},
+ cpu::percpu::this_cpu_mut,
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[repr(C, packed)]
+#[derive(Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ msg_seqno,
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ msg_vmpck: 0,
+ ..Default::default()
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ self.authtag
+ .get_mut(..new_tag.len())
+ .ok_or_else(SvsmReqError::invalid_parameter)?
+ .copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ fn get_aad_slice(&self) -> &[u8] {
+ let self_gva = self as *const _ as *const u8;
+ let algo_gva = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_gva.offset_from(self_gva) } as usize;
+
+ unsafe { from_raw_parts(algo_gva, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+
+ fn copy_from(&mut self, src: &SnpGuestRequestMsgHdr) {
+ unsafe { copy_nonoverlapping(src, self, 1) };
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ if self.set_encrypted().is_err() {
+ log::error!("SnpGuestRequestMsg: failed to set page to encrypted. Memory leak!");
+ let boxed = unsafe { Box::from_raw(self) };
+ core::mem::forget(boxed);
+ }
+ }
+}
+
+impl SnpGuestRequestMsg {
+ // Allocate the object in the heap without going through stack as
+ // this is a large object
+ //
+ // # Panic
+ //
+ // * Memory allocated is not page aligned or Self does not
+ // fit into a page
+ pub fn boxed_new() -> Result<Box<Self>, SvsmReqError> {
+ let layout = Layout::new::<Self>();
+
+ // The GHCB spec says it has to fit in one page and be page aligned
+ assert!(layout.size() <= PAGE_SIZE);
+
+ unsafe {
+ let addr = alloc(layout);
+ if addr.is_null() {
+ return Err(SvsmReqError::invalid_request());
+ }
+
+ assert!(VirtAddr::from(addr).is_page_aligned());
+
+ // TODO: have a zeroed page. However, the PSP always returns
+ // the error code 22 (INVALID_PARAM) if the page is written with
+ // zeros here or earlier (e.g. alloc_zeroed, allocate_zeroed_page)
+
+ let ptr = addr.cast::<Self>();
+ Ok(Box::from_raw(ptr))
+ }
+ }
+
+ // Clear the C-bit (memory encryption bit) for the Self page
+ pub fn set_shared(&mut self) -> Result<(), SvsmReqError> {
+ let vaddr = VirtAddr::from(self as *mut Self);
+ this_cpu_mut()
+ .get_pgtable()
+ .set_shared_4k(vaddr)
+ .map_err(|_| SvsmReqError::invalid_request()) | I cannot see where the hypervisor is informed of the encrypted state change for this memory. On my test system this resulted in a lockup when the memory contents were written.
I fixed it with the following temporary code:
```suggestion
let vaddr = VirtAddr::from(self as *mut Self);
this_cpu_mut()
.get_pgtable()
.set_shared_4k(vaddr)
.map_err(|_| SvsmReqError::invalid_request())?;
let paddr = virt_to_phys(vaddr);
this_cpu_mut()
.ghcb()
.page_state_change(
paddr,
paddr + PAGE_SIZE,
PageSize::Regular,
PageStateChangeOp::PscShared,
)
.map_err(|_| SvsmReqError::invalid_request())
```
This also needs to be done in `set_encrypted()` and the corresponding `range` functions in the same module. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Driver to send `SNP_GUEST_REQUEST` commands to the PSP. It can be any of the
+//! request or response command types defined in the SEV-SNP spec, regardless if it's
+//! a regular or an extended command.
+
+extern crate alloc;
+
+use alloc::boxed::Box;
+use core::{cell::OnceCell, mem::size_of};
+
+use crate::{
+ address::VirtAddr,
+ cpu::percpu::this_cpu_mut,
+ error::SvsmError,
+ greq::msg::{SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType},
+ locking::SpinLock,
+ protocols::errors::{SvsmReqError, SvsmResultCode},
+ sev::{
+ ghcb::GhcbError,
+ secrets_page::{disable_vmpck0, get_vmpck0, is_vmpck0_clear, VMPCK_SIZE},
+ },
+ types::PAGE_SHIFT,
+ BIT,
+};
+
+/// Global `SNP_GUEST_REQUEST` driver instance
+static GREQ_DRIVER: SpinLock<OnceCell<SnpGuestRequestDriver>> = SpinLock::new(OnceCell::new());
+
+// Hypervisor error codes
+
+/// Buffer provided is too small
+const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32);
+/// Hypervisor busy, try again
+const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33);
+
+/// Class of the `SNP_GUEST_REQUEST` command: Regular or Extended
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+enum SnpGuestRequestClass {
+ Regular = 0,
+ Extended = 1,
+}
+
+/// `SNP_GUEST_REQUEST` driver
+#[derive(Debug)]
+struct SnpGuestRequestDriver {
+ /// Shared page used for the `SNP_GUEST_REQUEST` request
+ request: Box<SnpGuestRequestMsg>, | This is technically unsound. Rust assumes that the memory pointed to in those `Box`es behave according to it's ownership rules, but given that they are shared, the host can mess with the memory in a way that Rust doesn't allow. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,572 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use alloc::{
+ alloc::{alloc, Layout},
+ boxed::Box,
+};
+use core::{
+ mem::size_of,
+ ptr::copy_nonoverlapping,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+use crate::{
+ address::{Address, VirtAddr},
+ cpu::percpu::this_cpu_mut,
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[repr(C, packed)]
+#[derive(Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ msg_seqno,
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ msg_vmpck: 0,
+ ..Default::default()
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ self.authtag
+ .get_mut(..new_tag.len())
+ .ok_or_else(SvsmReqError::invalid_parameter)?
+ .copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ fn get_aad_slice(&self) -> &[u8] {
+ let self_gva = self as *const _ as *const u8;
+ let algo_gva = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_gva.offset_from(self_gva) } as usize;
+
+ unsafe { from_raw_parts(algo_gva, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+
+ fn copy_from(&mut self, src: &SnpGuestRequestMsgHdr) {
+ unsafe { copy_nonoverlapping(src, self, 1) };
+ } | If we derive `Clone` & `Copy` for `SnpGuestRequestMsg`, we can get rid of the unsafe by simply doing `*self = *src`. |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,572 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use alloc::{
+ alloc::{alloc, Layout},
+ boxed::Box,
+};
+use core::{
+ mem::size_of,
+ ptr::copy_nonoverlapping,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+use crate::{
+ address::{Address, VirtAddr},
+ cpu::percpu::this_cpu_mut,
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[repr(C, packed)]
+#[derive(Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ msg_seqno,
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ msg_vmpck: 0,
+ ..Default::default()
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ self.authtag
+ .get_mut(..new_tag.len())
+ .ok_or_else(SvsmReqError::invalid_parameter)?
+ .copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ fn get_aad_slice(&self) -> &[u8] {
+ let self_gva = self as *const _ as *const u8;
+ let algo_gva = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_gva.offset_from(self_gva) } as usize;
+
+ unsafe { from_raw_parts(algo_gva, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+
+ fn copy_from(&mut self, src: &SnpGuestRequestMsgHdr) {
+ unsafe { copy_nonoverlapping(src, self, 1) };
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ if self.set_encrypted().is_err() {
+ log::error!("SnpGuestRequestMsg: failed to set page to encrypted. Memory leak!");
+ let boxed = unsafe { Box::from_raw(self) };
+ core::mem::forget(boxed); | This doesn't do anything, does it? |
svsm | github_2023 | others | 69 | coconut-svsm | Freax13 | @@ -0,0 +1,572 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (C) 2023 IBM
+//
+// Authors: Claudio Carvalho <cclaudio@linux.ibm.com>
+
+//! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload
+
+extern crate alloc;
+
+use alloc::{
+ alloc::{alloc, Layout},
+ boxed::Box,
+};
+use core::{
+ mem::size_of,
+ ptr::copy_nonoverlapping,
+ slice::{from_raw_parts, from_raw_parts_mut},
+};
+
+use crate::{
+ address::{Address, VirtAddr},
+ cpu::percpu::this_cpu_mut,
+ crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE},
+ protocols::errors::SvsmReqError,
+ sev::secrets_page::VMPCK_SIZE,
+ types::PAGE_SIZE,
+};
+
+// Message Header Format (AMD SEV-SNP spec. table 98)
+
+/// Version of the message header
+const HDR_VERSION: u8 = 1;
+/// Version of the message payload
+const MSG_VERSION: u8 = 1;
+
+/// AEAD Algorithm Encodings (AMD SEV-SNP spec. table 99)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestAead {
+ Invalid = 0,
+ Aes256Gcm = 1,
+}
+
+/// Message Type Encodings (AMD SEV-SNP spec. table 100)
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(u8)]
+pub enum SnpGuestRequestMsgType {
+ Invalid = 0,
+ ReportRequest = 5,
+ ReportResponse = 6,
+}
+
+impl TryFrom<u8> for SnpGuestRequestMsgType {
+ type Error = SvsmReqError;
+
+ fn try_from(v: u8) -> Result<Self, Self::Error> {
+ match v {
+ x if x == SnpGuestRequestMsgType::Invalid as u8 => Ok(SnpGuestRequestMsgType::Invalid),
+ x if x == SnpGuestRequestMsgType::ReportRequest as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportRequest)
+ }
+ x if x == SnpGuestRequestMsgType::ReportResponse as u8 => {
+ Ok(SnpGuestRequestMsgType::ReportResponse)
+ }
+ _ => Err(SvsmReqError::invalid_parameter()),
+ }
+ }
+}
+
+/// Message header size
+const MSG_HDR_SIZE: usize = size_of::<SnpGuestRequestMsgHdr>();
+/// Message payload size
+const MSG_PAYLOAD_SIZE: usize = PAGE_SIZE - MSG_HDR_SIZE;
+
+/// Maximum buffer size that the hypervisor takes to store the
+/// SEV-SNP certificates
+pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 4 * PAGE_SIZE;
+
+/// `SNP_GUEST_REQUEST` message format
+#[repr(C, packed)]
+#[derive(Debug)]
+pub struct SnpGuestRequestMsg {
+ hdr: SnpGuestRequestMsgHdr,
+ pld: [u8; MSG_PAYLOAD_SIZE],
+}
+
+/// `SNP_GUEST_REQUEST` message header format
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug)]
+pub struct SnpGuestRequestMsgHdr {
+ /// Message authentication tag
+ authtag: [u8; 32],
+ /// The sequence number for this message
+ msg_seqno: u64,
+ /// Reserve. Must be zero.
+ rsvd1: [u8; 8],
+ /// The AEAD used to encrypt this message
+ algo: u8,
+ /// The version of the message header
+ hdr_version: u8,
+ /// The size of the message header in bytes
+ hdr_sz: u16,
+ /// The type of the payload
+ msg_type: u8,
+ /// The version of the payload
+ msg_version: u8,
+ /// The size of the payload in bytes
+ msg_sz: u16,
+ /// Reserved. Must be zero.
+ rsvd2: u32,
+ /// The ID of the VMPCK used to protect this message
+ msg_vmpck: u8,
+ /// Reserved. Must be zero.
+ rsvd3: [u8; 35],
+}
+
+impl SnpGuestRequestMsgHdr {
+ /// Allocate a new [`SnpGuestRequestMsgHdr`] and initialize it
+ ///
+ /// # Panic
+ ///
+ /// * [`SnpGuestRequestMsgHdr`] size does not fit in a u16.
+ pub fn new(msg_sz: u16, msg_type: SnpGuestRequestMsgType, msg_seqno: u64) -> Self {
+ assert!(u16::try_from(MSG_HDR_SIZE).is_ok());
+
+ Self {
+ msg_seqno,
+ algo: SnpGuestRequestAead::Aes256Gcm as u8,
+ hdr_version: HDR_VERSION,
+ hdr_sz: MSG_HDR_SIZE as u16,
+ msg_type: msg_type as u8,
+ msg_version: MSG_VERSION,
+ msg_sz,
+ msg_vmpck: 0,
+ ..Default::default()
+ }
+ }
+
+ /// Set the authenticated tag
+ fn set_authtag(&mut self, new_tag: &[u8]) -> Result<(), SvsmReqError> {
+ self.authtag
+ .get_mut(..new_tag.len())
+ .ok_or_else(SvsmReqError::invalid_parameter)?
+ .copy_from_slice(new_tag);
+ Ok(())
+ }
+
+ /// Validate the [`SnpGuestRequestMsgHdr`] fields
+ fn validate(
+ &self,
+ msg_type: SnpGuestRequestMsgType,
+ msg_seqno: u64,
+ ) -> Result<(), SvsmReqError> {
+ let header_size =
+ u16::try_from(MSG_HDR_SIZE).map_err(|_| SvsmReqError::invalid_format())?;
+ if self.hdr_version != HDR_VERSION
+ || self.hdr_sz != header_size
+ || self.algo != SnpGuestRequestAead::Aes256Gcm as u8
+ || self.msg_type != msg_type as u8
+ || self.msg_vmpck != 0
+ || self.msg_seqno != msg_seqno
+ {
+ return Err(SvsmReqError::invalid_format());
+ }
+ Ok(())
+ }
+
+ /// Get a slice of the header fields used as additional authenticated data (AAD)
+ fn get_aad_slice(&self) -> &[u8] {
+ let self_gva = self as *const _ as *const u8;
+ let algo_gva = &self.algo as *const u8;
+
+ let algo_offset = unsafe { algo_gva.offset_from(self_gva) } as usize;
+
+ unsafe { from_raw_parts(algo_gva, MSG_HDR_SIZE - algo_offset) }
+ }
+
+ /// Get [`SnpGuestRequestMsgHdr`] as a mutable slice reference
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { from_raw_parts_mut(self as *mut _ as *mut u8, MSG_HDR_SIZE) }
+ }
+
+ fn copy_from(&mut self, src: &SnpGuestRequestMsgHdr) {
+ unsafe { copy_nonoverlapping(src, self, 1) };
+ }
+}
+
+impl Default for SnpGuestRequestMsgHdr {
+ // default() method implementation. We can't derive Default because
+ // the field "rsvd3: [u8; 35]" conflicts with the Default trait, which
+ // supports up to [T; 32].
+ fn default() -> Self {
+ Self {
+ authtag: [0; 32],
+ msg_seqno: 0,
+ rsvd1: [0; 8],
+ algo: 0,
+ hdr_version: 0,
+ hdr_sz: 0,
+ msg_type: 0,
+ msg_version: 0,
+ msg_sz: 0,
+ rsvd2: 0,
+ msg_vmpck: 0,
+ rsvd3: [0; 35],
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl Drop for SnpGuestRequestMsg {
+ fn drop(&mut self) {
+ if self.set_encrypted().is_err() {
+ log::error!("SnpGuestRequestMsg: failed to set page to encrypted. Memory leak!");
+ let boxed = unsafe { Box::from_raw(self) };
+ core::mem::forget(boxed);
+ }
+ }
+}
+
+impl SnpGuestRequestMsg {
+ // Allocate the object in the heap without going through stack as
+ // this is a large object | Do we really need to avoid copying from the stack here? I replaced this function's body with
```rust
Ok(Box::new(Self {
hdr: Default::default(),
pld: [0; MSG_PAYLOAD_SIZE],
}))
```
and didn't hit any errors. |
svsm | github_2023 | others | 112 | coconut-svsm | 00xc | @@ -1605,3 +1605,197 @@ impl<'a, RP: Elf64RelocProcessor> Iterator for Elf64AppliedRelaIterator<'a, RP>
Some(Ok(Some(reloc_op)))
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_elf64_shdr_verify_methods() {
+ // Create a valid Elf64Shdr instance for testing.
+ let valid_shdr = Elf64Shdr {
+ sh_name: 1,
+ sh_type: 2,
+ sh_flags: Elf64ShdrFlags::WRITE | Elf64ShdrFlags::ALLOC,
+ sh_addr: 0x1000,
+ sh_offset: 0x2000,
+ sh_size: 0x3000,
+ sh_link: 3,
+ sh_info: 4,
+ sh_addralign: 8,
+ sh_entsize: 0,
+ };
+
+ // Verify that the valid Elf64Shdr instance passes verification.
+ assert!(valid_shdr.verify().is_ok());
+
+ // Create an invalid Elf64Shdr instance for testing.
+ let invalid_shdr = Elf64Shdr {
+ sh_name: 0,
+ sh_type: 2,
+ sh_flags: Elf64ShdrFlags::from_bits(0).unwrap(),
+ sh_addr: 0x1000,
+ sh_offset: 0x2000,
+ sh_size: 0x3000,
+ sh_link: 3,
+ sh_info: 4,
+ sh_addralign: 7, // Invalid alignment
+ sh_entsize: 0,
+ };
+
+ // Verify that the invalid Elf64Shdr instance fails verification.
+ assert!(invalid_shdr.verify().is_err());
+ }
+
+ #[test]
+ fn test_elf64_dynamic_reloc_table_verify_valid() {
+ // Create a valid Elf64DynamicRelocTable instance for testing.
+ let reloc_table = Elf64DynamicRelocTable {
+ base_vaddr: 0x1000,
+ size: 0x2000,
+ entsize: 0x30,
+ };
+
+ // Verify that the valid Elf64DynamicRelocTable instance passes verification.
+ assert!(reloc_table.verify().is_ok());
+ }
+
+ #[test]
+ fn test_elf64_addr_range_methods() {
+ // Test Elf64AddrRange::len() and Elf64AddrRange::is_empty().
+
+ // Create an Elf64AddrRange instance for testing.
+ let addr_range = Elf64AddrRange {
+ vaddr_begin: 0x1000,
+ vaddr_end: 0x2000,
+ };
+
+ // Check that the length calculation is correct.
+ assert_eq!(addr_range.len(), 0x1000);
+
+ // Check if the address range is empty.
+ assert!(!addr_range.is_empty());
+
+ // Test Elf64AddrRange::try_from().
+
+ // Create a valid input tuple for try_from.
+ let valid_input: (Elf64Addr, Elf64Xword) = (0x1000, 0x2000);
+
+ // Attempt to create an Elf64AddrRange from the valid input.
+ let result = Elf64AddrRange::try_from(valid_input);
+
+ // Verify that the result is Ok and contains the expected Elf64AddrRange.
+ assert!(result.is_ok());
+ let valid_addr_range = result.unwrap();
+ assert_eq!(valid_addr_range.vaddr_begin, 0x1000);
+ assert_eq!(valid_addr_range.vaddr_end, 0x3000);
+ }
+
+ #[test]
+ fn test_elf64_file_range_try_from() {
+ // Valid range
+ let valid_range: (Elf64Off, Elf64Xword) = (0, 100);
+ let result: Result<Elf64FileRange, ElfError> = valid_range.try_into();
+ assert!(result.is_ok());
+ let file_range = result.unwrap();
+ assert_eq!(file_range.offset_begin, 0);
+ assert_eq!(file_range.offset_end, 100);
+
+ // Invalid range (overflow)
+ let invalid_range: (Elf64Off, Elf64Xword) = (usize::MAX as Elf64Off, 100);
+ let result: Result<Elf64FileRange, ElfError> = invalid_range.try_into();
+ assert!(result.is_err());
+ } | Instead of asserting with `is_ok()` or `is_err()` and then unwrapping, the whole thing can be collapsed to a single `assert_eq!()` by destructuring the `Result`. So I would suggest something like the following:
```diff
diff --git a/src/elf/mod.rs b/src/elf/mod.rs
index 80bca2c..3bd3ba3 100644
--- a/src/elf/mod.rs
+++ b/src/elf/mod.rs
@@ -1694,17 +1694,16 @@ mod tests {
#[test]
fn test_elf64_file_range_try_from() {
// Valid range
+ let expected = Elf64FileRange { offset_begin: 0, offset_end: 100 };
let valid_range: (Elf64Off, Elf64Xword) = (0, 100);
let result: Result<Elf64FileRange, ElfError> = valid_range.try_into();
- assert!(result.is_ok());
- let file_range = result.unwrap();
- assert_eq!(file_range.offset_begin, 0);
- assert_eq!(file_range.offset_end, 100);
+ assert_eq!(result, Ok(expected));
// Invalid range (overflow)
+ let expected = ElfError::InvalidFileRange;
let invalid_range: (Elf64Off, Elf64Xword) = (usize::MAX as Elf64Off, 100);
let result: Result<Elf64FileRange, ElfError> = invalid_range.try_into();
- assert!(result.is_err());
+ assert_eq!(result, Err(expected));
}
#[test]
``` |
svsm | github_2023 | others | 112 | coconut-svsm | 00xc | @@ -155,3 +155,46 @@ impl<T: Debug> RWLock<T> {
}
}
}
+
+mod tests {
+
+ #[test]
+ fn test_lock_rw() {
+ use crate::locking::*;
+ let rwlock = RWLock::new(42);
+
+ // Acquire a read lock and check the initial value
+ let read_guard = rwlock.lock_read();
+ assert_eq!(*read_guard, 42);
+
+ drop(read_guard);
+
+ let read_guard2 = rwlock.lock_read();
+ assert_eq!(*read_guard2, 42);
+
+ // Create another RWLock instance for modification
+ let rwlock_modify = RWLock::new(0);
+
+ let mut write_guard = rwlock_modify.lock_write();
+ *write_guard = 99;
+ assert_eq!(*write_guard, 99);
+
+ drop(write_guard);
+
+ let read_guard = rwlock.lock_read();
+ assert_eq!(*read_guard, 42);
+
+ // Let's test two concurrent readers on a new RWLock instance
+ let rwlock_concurrent = RWLock::new(123);
+
+ let read_guard1 = rwlock_concurrent.lock_read();
+ let read_guard2 = rwlock_concurrent.lock_read();
+
+ // Assert that both readers can access the same value (123)
+ assert_eq!(*read_guard1, 123);
+ assert_eq!(*read_guard2, 123);
+
+ drop(read_guard1);
+ drop(read_guard2);
+ }
+} | Just a suggestion, but maybe it would make sense to split this test up. The part about two concurrent readers seems separate. |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -0,0 +1,205 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Carlos López <carlos.lopez@suse.com>
+
+use crate::address::Address;
+use crate::types::PageSize;
+
+/// An abstraction over a memory region, expressed in terms of physical
+/// ([`PhysAddr`](crate::address::PhysAddr)) or virtual
+/// ([`VirtAddr`](crate::address::VirtAddr)) addresses.
+#[derive(Clone, Copy, Debug)]
+pub struct MemoryRegion<A> {
+ start: A,
+ end: A,
+}
+
+impl<A> MemoryRegion<A>
+where
+ A: Address,
+{
+ /// Create a new memory region starting at address `start`, spanning `len`
+ /// bytes.
+ pub fn new(start: A, len: usize) -> Self {
+ let end = A::from(start.bits() + len);
+ Self { start, end }
+ }
+
+ /// Create a new memory region with overflow checks.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let start = VirtAddr::from(u64::MAX);
+ /// let region = MemoryRegion::checked_new(start, PAGE_SIZE);
+ /// assert!(region.is_none());
+ /// ```
+ pub fn checked_new(start: A, len: usize) -> Option<Self> {
+ let end = start.checked_add(len)?;
+ Some(Self { start, end })
+ }
+
+ /// Create a memory region from two raw addresses.
+ pub const fn from_addresses(start: A, end: A) -> Self {
+ Self { start, end }
+ }
+
+ /// The base address of the memory region, originally set in
+ /// [`MemoryRegion::new()`].
+ #[inline]
+ pub const fn start(&self) -> A {
+ self.start
+ }
+
+ /// The length of the memory region in bytes, originally set in
+ /// [`MemoryRegion::new()`].
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.end.bits().saturating_sub(self.start.bits())
+ }
+
+ /// Returns whether the region spans any actual memory.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), 0);
+ /// assert!(r.is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// The end address of the memory region.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let base = VirtAddr::from(0xffffff0000u64);
+ /// let region = MemoryRegion::new(base, PAGE_SIZE);
+ /// assert_eq!(region.end(), VirtAddr::from(0xffffff1000u64));
+ /// ```
+ #[inline]
+ pub const fn end(&self) -> A {
+ self.end
+ }
+
+ /// Checks whether two regions overlap. This does *not* include contiguous
+ /// regions, use [`MemoryRegion::contiguous()`] for that purpose.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff2000u64), PAGE_SIZE);
+ /// assert!(!r1.overlap(&r2));
+ /// ```
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE * 2);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff1000u64), PAGE_SIZE);
+ /// assert!(r1.overlap(&r2));
+ /// ```
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// // Contiguous regions do not overlap
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff1000u64), PAGE_SIZE);
+ /// assert!(!r1.overlap(&r2));
+ /// ```
+ pub fn overlap(&self, other: &Self) -> bool {
+ self.start() < other.end() && self.end() > other.start()
+ }
+
+ /// Checks whether two regions are contiguous or overlapping. This is a
+ /// less strict check than [`MemoryRegion::overlap()`].
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff1000u64), PAGE_SIZE);
+ /// assert!(r1.contiguous(&r2));
+ /// ```
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff2000u64), PAGE_SIZE);
+ /// assert!(!r1.contiguous(&r2));
+ /// ```
+ pub fn contiguous(&self, other: &Self) -> bool {
+ self.start() <= other.end() && self.end() >= other.start()
+ }
+
+ /// Merge two regions. It does not check whether the two regions are
+ /// contiguous in the first place, so the resulting region will cover
+ /// any non-overlapping memory between both.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::PAGE_SIZE;
+ /// # use svsm::utils::MemoryRegion;
+ /// let r1 = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE);
+ /// let r2 = MemoryRegion::new(VirtAddr::from(0xffffff1000u64), PAGE_SIZE);
+ /// let r3 = r1.merge(&r2);
+ /// assert_eq!(r3.start(), r1.start());
+ /// assert_eq!(r3.len(), r1.len() + r2.len());
+ /// assert_eq!(r3.end(), r2.end());
+ /// ```
+ pub fn merge(&self, other: &Self) -> Self {
+ let start = self.start.min(other.start);
+ let end = self.end().max(other.end());
+ Self { start, end }
+ }
+
+ /// Iterate over the addresses covering the memory region in jumps of the
+ /// specified page size.
+ ///
+ /// ```rust
+ /// # use svsm::address::VirtAddr;
+ /// # use svsm::types::{PAGE_SIZE, PageSize};
+ /// # use svsm::utils::MemoryRegion;
+ /// let region = MemoryRegion::new(VirtAddr::from(0xffffff0000u64), PAGE_SIZE * 2);
+ /// let mut iter = region.iter_pages(PageSize::Regular);
+ /// assert_eq!(iter.next(), Some(VirtAddr::from(0xffffff0000u64)));
+ /// assert_eq!(iter.next(), Some(VirtAddr::from(0xffffff1000u64)));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ pub fn iter_pages(&self, size: PageSize) -> impl Iterator<Item = A> { | The addresses returned by this function are not necessarily aligned to the page size. We should probably either fix this or document it. |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -0,0 +1,205 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022-2023 SUSE LLC
+//
+// Author: Carlos López <carlos.lopez@suse.com>
+
+use crate::address::Address;
+use crate::types::PageSize;
+
+/// An abstraction over a memory region, expressed in terms of physical
+/// ([`PhysAddr`](crate::address::PhysAddr)) or virtual
+/// ([`VirtAddr`](crate::address::VirtAddr)) addresses.
+#[derive(Clone, Copy, Debug)]
+pub struct MemoryRegion<A> { | Have you considered creating two type aliases for virtual and physical addresses? This could be a bit easier to read.
```rust
pub type PhysMemoryRegion = MemoryRegion<PhysAddr>;
pub type VirtMemoryRegion = MemoryRegion<VirtAddr>;
``` |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -154,14 +155,17 @@ impl<'a> FwCfg<'a> {
}
fn read_memory_region(&self) -> MemoryRegion {
- let start: u64 = self.read_le();
- let size: u64 = self.read_le();
- let end = start.saturating_add(size);
+ let start = PhysAddr::from(self.read_le::<u64>());
+ let size = self.read_le::<u64>();
+ let end = start.saturating_add(size as usize); | Did some of the changes in this file end up in the wrong commit? I don't see to connection to changing the return type of `max_phys_addr`. |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -55,7 +55,7 @@ pub trait Address:
self.is_aligned(PAGE_SIZE)
}
- fn checked_offset(&self, off: InnerAddr) -> Option<Self> {
+ fn checked_add(&self, off: InnerAddr) -> Option<Self> { | FWIW this also matches the function names of pointers in the standard library: [`pointer::add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add) takes a `usize` (like the function here), whereas [`pointer::offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset) takes an `isize`. |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -40,37 +39,31 @@ pub fn init_memory_map(fwcfg: &FwCfg, launch_info: &KernelLaunchInfo) -> Result<
regions.remove(i);
// 2. Insert a region up until the start of SVSM memory (if non-empty).
- let region_before_start = region.start;
- let region_before_end = launch_info.kernel_region_phys_start;
+ let region_before_start = region.start();
+ let region_before_end = kernel_region.start();
if region_before_start < region_before_end {
regions.insert(
i,
- MemoryRegion {
- start: region_before_start,
- end: region_before_end,
- },
+ MemoryRegion::from_addresses(region_before_start, region_before_end),
);
i += 1;
}
// 3. Insert a region up after the end of SVSM memory (if non-empty).
- let region_after_start = launch_info.kernel_region_phys_end;
- let region_after_end = region.end;
+ let region_after_start = kernel_region.end();
+ let region_after_end = region.end();
if region_after_start < region_after_end {
regions.insert(
i,
- MemoryRegion {
- start: region_after_start,
- end: region_after_end,
- },
+ MemoryRegion::from_addresses(region_after_start, region_after_end),
);
i += 1;
}
}
log::info!("Guest Memory Regions:");
for r in regions.iter() {
- log::info!(" {:018x}-{:018x}", r.start, r.end);
+ log::info!(" {:018x}-{:018x}", r.start(), r.end()); | Should we maybe just implement `Debug` or `Display` for `MemoryRange`? |
svsm | github_2023 | others | 131 | coconut-svsm | Freax13 | @@ -157,8 +157,8 @@ pub extern "C" fn stage2_main(launch_info: &Stage1LaunchInfo) {
log::info!("COCONUT Secure Virtual Machine Service Module (SVSM) Stage 2 Loader");
- let kernel_region_phys_start = PhysAddr::from(r.start);
- let kernel_region_phys_end = PhysAddr::from(r.end);
+ let kernel_region_phys_start = r.start();
+ let kernel_region_phys_end = r.end(); | Maybe outside of the scope of this pr, but it seems to me that maybe `init_valid_bitmap_alloc` should take a `MemoryRange`. |
svsm | github_2023 | others | 138 | coconut-svsm | 00xc | @@ -9,79 +9,165 @@ use core::fmt::Debug;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicU64, Ordering};
+/// A guard that provides read access to the data protected by [`RWLock`]
#[derive(Debug)]
#[must_use = "if unused the RWLock will immediately unlock"]
pub struct ReadLockGuard<'a, T: Debug> {
+ /// Reference to the associated `AtomicU64` in the [`RWLock`]
rwlock: &'a AtomicU64,
+ /// Reference to the protected data
data: &'a T,
}
+/// Implements the behavior of the [`ReadLockGuard`] when it is dropped
impl<'a, T: Debug> Drop for ReadLockGuard<'a, T> {
+ /// Release the read lock
fn drop(&mut self) {
self.rwlock.fetch_sub(1, Ordering::Release);
}
}
+/// Implements the behavior of dereferencing the [`ReadLockGuard`] to
+/// access the protected data.
impl<'a, T: Debug> Deref for ReadLockGuard<'a, T> {
type Target = T;
+ /// Allow reading the protected data through deref
fn deref(&self) -> &T {
self.data
}
}
+/// A guard that provides exclusive write access to the data protected by [`RWLock`]
#[derive(Debug)]
#[must_use = "if unused the RWLock will immediately unlock"]
pub struct WriteLockGuard<'a, T: Debug> {
+ /// Reference to the associated `AtomicU64` in the [`RWLock`]
rwlock: &'a AtomicU64,
+ /// Reference to the protected data (mutable)
data: &'a mut T,
}
+/// Implements the behavior of the [`WriteLockGuard`] when it is dropped
impl<'a, T: Debug> Drop for WriteLockGuard<'a, T> {
fn drop(&mut self) {
// There are no readers - safe to just set lock to 0
self.rwlock.store(0, Ordering::Release);
}
}
+/// Implements the behavior of dereferencing the [`WriteLockGuard`] to
+/// access the protected data.
impl<'a, T: Debug> Deref for WriteLockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
+/// Implements the behavior of dereferencing the [`WriteLockGuard`] to
+/// access the protected data in a mutable way.
impl<'a, T: Debug> DerefMut for WriteLockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
+/// A simple Read-Write Lock (RWLock) that allows multiple readers or
+/// one exclusive writer.
#[derive(Debug)]
pub struct RWLock<T: Debug> {
+ /// An atomic 64-bit integer used for synchronization
rwlock: AtomicU64,
+ /// An UnsafeCell for interior mutability
data: UnsafeCell<T>,
}
+/// Implements the trait `Sync` for the [`RWLock`], allowing safe
+/// concurrent access across threads.
unsafe impl<T: Debug> Sync for RWLock<T> {}
+/// Splits a 64-bit value into two parts: readers (low 32 bits) and
+/// writers (high 32 bits).
+///
+/// # Parameters
+///
+/// - `val`: A 64-bit unsigned integer value to be split.
+///
+/// # Returns
+///
+/// A tuple containing two 32-bit unsigned integer values. The first
+/// element of the tuple is the lower 32 bits of input value, and the
+/// second is the upper 32 bits.
+///
#[inline]
fn split_val(val: u64) -> (u64, u64) {
(val & 0xffff_ffffu64, val >> 32)
}
+/// Composes a 64-bit value by combining the number of readers (low 32
+/// bits) and writers (high 32 bits). This function is used to create a
+/// 64-bit synchronization value that represents the current state of the
+/// RWLock, including the count of readers and writers.
+///
+/// # Parameters
+///
+/// - `readers`: The number of readers (low 32 bits) currently holding read locks.
+/// - `writers`: The number of writers (high 32 bits) currently holding write locks.
+///
+/// # Returns
+///
+/// A 64-bit value representing the combined state of readers and writers in the RWLock.
+///
#[inline]
fn compose_val(readers: u64, writers: u64) -> u64 {
(readers & 0xffff_ffffu64) | (writers << 32)
}
+/// A reader-writer lock that allows multiple readers or a single writer
+/// to access the protected data. [`RWLock`] provides exclusive access for
+/// writers and shared access for readers, for efficient synchronization.
+///
impl<T: Debug> RWLock<T> {
+ /// Creates a new [`RWLock`] instance with the provided initial data.
+ ///
+ /// # Parameters
+ ///
+ /// - `data`: The initial data to be protected by the [`RWLock`].
+ ///
+ /// # Returns
+ ///
+ /// A new [`RWLock`] instance with the specified initial data.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// use core::sync::atomic::{AtomicU64, Ordering};
+ /// use std::fmt::Debug;
+ /// use svsm::locking::*; | These imports are not needed.
```suggestion
/// use svsm::locking::RWLock;
``` |
svsm | github_2023 | others | 138 | coconut-svsm | 00xc | @@ -9,43 +9,107 @@ use core::fmt::Debug;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicU64, Ordering};
+/// A lock guard used in conjunction with [`SpinLock`]. This lock guard | I'd make it explicit that one obtains this type from a `SpinLock`:
```suggestion
/// A lock guard obtained from a [`SpinLock`]. This lock guard
``` |
svsm | github_2023 | others | 138 | coconut-svsm | 00xc | @@ -9,43 +9,107 @@ use core::fmt::Debug;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicU64, Ordering};
+/// A lock guard used in conjunction with [`SpinLock`]. This lock guard
+/// provides exclusive access to the data protected by a [`SpinLock`],
+/// ensuring that the lock is released when it goes out of scope.
+///
+/// # Examples
+///
+/// ```
+/// use svsm::locking::{SpinLock, LockGuard}; | ```suggestion
/// use svsm::locking::SpinLock;
``` |
svsm | github_2023 | others | 138 | coconut-svsm | 00xc | @@ -9,43 +9,107 @@ use core::fmt::Debug;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicU64, Ordering};
+/// A lock guard used in conjunction with [`SpinLock`]. This lock guard
+/// provides exclusive access to the data protected by a [`SpinLock`],
+/// ensuring that the lock is released when it goes out of scope.
+///
+/// # Examples
+///
+/// ```
+/// use svsm::locking::{SpinLock, LockGuard};
+///
+/// let data = 42;
+/// let spin_lock = SpinLock::new(data);
+///
+/// {
+/// let mut guard = spin_lock.lock();
+/// *guard += 1; // Modify the protected data.
+/// }; // Lock is automatically released when `guard` goes out of scope.
+/// ```
#[derive(Debug)]
#[must_use = "if unused the SpinLock will immediately unlock"]
pub struct LockGuard<'a, T: Debug> {
holder: &'a AtomicU64,
data: &'a mut T,
}
+/// Implements the behavior of the [`LockGuard`] when it is dropped
impl<'a, T: Debug> Drop for LockGuard<'a, T> {
+ /// Automatically releases the lock when the guard is dropped
fn drop(&mut self) {
self.holder.fetch_add(1, Ordering::Release);
}
}
+/// Implements the behavior of dereferencing the [`LockGuard`] to
+/// access the protected data.
impl<'a, T: Debug> Deref for LockGuard<'a, T> {
type Target = T;
+ /// Provides read-only access to the protected data
fn deref(&self) -> &T {
self.data
}
}
+/// Implements the behavior of dereferencing the [`LockGuard`] to
+/// access the protected data in a mutable way.
impl<'a, T: Debug> DerefMut for LockGuard<'a, T> {
+ /// Provides mutable access to the protected data
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
+/// A simple spinlock implementation for protecting concurrent data access.
+///
+/// # Examples
+///
+/// ```
+/// use svsm::locking::SpinLock;
+///
+/// let data = 42;
+/// let spin_lock = SpinLock::new(data);
+///
+/// // Acquire the lock and modify the protected data.
+/// {
+/// let mut guard = spin_lock.lock();
+/// *guard += 1;
+/// }; // Lock is automatically released when `guard` goes out of scope.
+///
+/// // Try to acquire the lock without blocking
+/// if let Some(mut guard) = spin_lock.try_lock() {
+/// *guard += 2;
+/// };
+/// ```
#[derive(Debug)]
pub struct SpinLock<T: Debug> {
+ /// This atomic counter is incremented each time a thread attempts to
+ /// acquire the lock. It helps to determine the order in which threads
+ /// acquire the lock.
current: AtomicU64,
+ /// This counter represents the thread that currently holds the lock
+ /// and has access to the protected data.
holder: AtomicU64,
+ /// This `UnsafeCell` is used to provide interior mutability of the
+ /// protected data, this is, it allows the data to be accessed/modified
+ /// while respecting the locking mechanism. | ```suggestion
/// protected data. That is, it allows the data to be accessed/modified
/// while enforcing the locking mechanism.
``` |
svsm | github_2023 | others | 141 | coconut-svsm | joergroedel | @@ -48,3 +48,51 @@ before allowing them to be committed. It can be installed by running
```
from the projects root directory.
+
+Documentation Style
+-------------------
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks.
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple.
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise.
+
+- We can't have a section "Panic" for every place the SVSM may panic, but
+ they should be included if your code checks assertions or uses the
+ `unwrap()` method.
+
+- Remember that if you update code, you also have to update its related
+ documentation to ensure maintainability.
+
+- Be aware that your documentation comments have the potential to break the
+ documentation generation process, which can delay the merging of your
+ changes. Your new documentation should be warning-free. | Can you please add examples for best practices here? It makes it much easier for contributors. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -58,7 +58,8 @@ Contributing
Contributing to the project is as easy as sending a pull-request via
GitHub. For detailed instructions on patch formatting and contribution
-guidelines please have a look at [CONTRIBUTING.md](CONTRIBUTING.md).
+guidelines please have a look at [CONTRIBUTING.md](CONTRIBUTING.md) and
+for documentation guidelines to [DOC-GUIDELINES.md](Documentation/DOC-GUIDELINES.md). | ```suggestion
guidelines please have a look at [CONTRIBUTING.md](CONTRIBUTING.md).
For documentation guidelines consult [DOC-GUIDELINES.md](Documentation/DOC-GUIDELINES.md).
``` |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,109 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+```rust
+/// This function does A, returns B, keep in mind C
+fn main() {
+ // Some code here
+}
+
+``` | This example does not really match the point it follows. I'd suggest moving this point below the next one ("use triple slashes..."), and have this example exemplify both points. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,109 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+```rust
+/// This function does A, returns B, keep in mind C
+fn main() {
+ // Some code here
+}
+
+```
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+``` | This example would benefit from a `pub fn...` at the end |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,109 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+```rust
+/// This function does A, returns B, keep in mind C
+fn main() {
+ // Some code here
+}
+
+```
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+```
+- We can't have a section "Panic" for every place the SVSM may panic, but
+ they should be included if your code checks assertions or uses the
+ `unwrap()` method. For instance:
+
+```rust
+/// # Panics
+///
+/// This function does not panic under normal circumstances. However, if
+/// the length `len` is greater than the allocated memory's actual capacity,
+/// it will panic.
+``` | Same thing, I would make it explicit that this goes before a function declaration. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,109 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+```rust
+/// This function does A, returns B, keep in mind C
+fn main() {
+ // Some code here
+}
+
+```
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+```
+- We can't have a section "Panic" for every place the SVSM may panic, but
+ they should be included if your code checks assertions or uses the
+ `unwrap()` method. For instance:
+
+```rust
+/// # Panics
+///
+/// This function does not panic under normal circumstances. However, if
+/// the length `len` is greater than the allocated memory's actual capacity,
+/// it will panic.
+```
+
+- Remember that if you update code, you also have to update its related
+ documentation to ensure maintainability.
+
+- Be aware that your documentation comments have the potential to break the
+ documentation generation process, which can delay the merging of your
+ changes. Your new documentation should be warning-free. | I would mention `cargo doc` here. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+ | ```suggestion
/// This function does A, takes parameter of type [`M`].
/// It returns [`B`], keep in mind C
fn main(a: M) -> B {
// Some code here
}
```
Note there is also an extra newline that we do not need. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory. | ```suggestion
/// 1. `src` and `dst` must point to valid memory.
``` |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`. | ```suggestion
/// 2. The length `len` must accurately represent the number of bytes in
/// `data`.
``` |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized | ```suggestion
/// 3. `src` must be correctly initialized.
``` |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+pub unsafe fn example_memcpy<T>(dest: *mut T, src: *const T, len: usize) {
+ // Ensure the pointers are not null
+ assert!(!dest.is_null() && !src.is_null()); | The function should have a `# Panics` section in this case. We can even merge this example with the one below. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+pub unsafe fn example_memcpy<T>(dest: *mut T, src: *const T, len: usize) {
+ // Ensure the pointers are not null
+ assert!(!dest.is_null() && !src.is_null());
+ let mut rcx: usize;
+
+ unsafe { | The function is already unsafe, so there is no need for this unsafe block as far as I know. |
svsm | github_2023 | others | 141 | coconut-svsm | 00xc | @@ -0,0 +1,129 @@
+Documentation Style
+===================
+
+In this project, code documentation is generated using Rustdoc, which
+automatically generates interactive web documentation. Here are some
+guidelines for documenting code effectively:
+
+- Follow [Rust's official indications.](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html)
+
+- Follow standard Markdown format, e.g. variables between backticks:
+
+- When adding doc comments to your code, use triple slashes (`///`)
+ to document items; if you also want to document modules or crates, use
+ `//!` and `#[doc = ""]` for documenting fields or expressions.
+
+```rust
+/// This function does A, takes parameter of type `m`
+/// It returns B, keep in mind C
+fn main(a: m) {
+ // Some code here
+}
+
+```
+
+- Documenting trait implementations is optional since the generated
+ Rust core library already documents them. The exception would be if your
+ implementation does something counterintuitive to the trait's general
+ definition.
+
+- When mentioning a type (e.g. \`RWLock\`, \`WriteLockGuard\`) it's good to
+ add a link to the type with square brackets (e.g. [\`RWLock\`],
+ [\`WriteLockGuard\`]).
+
+- When documenting a function, examples of usage relying on code blocks
+ can help understand how to use your code. However, keep in mind that
+ said code will be built and ran during tests, so it also needs to be
+ maintained -- keep it simple. Here is an example of function
+ documentation with Arguments, Returns and Examples:
+
+```rust
+
+/// Compares two [`Elf64AddrRange`] instances for partial ordering. It returns
+/// [`Some<Ordering>`] if there is a partial order, and [`None`] if there is no
+/// order (i.e., if the ranges overlap without being equal).
+///
+/// # Arguments
+///
+/// * `other` - The other [`Elf64AddrRange`] to compare to.
+///
+/// # Returns
+///
+/// - [`Some<Ordering::Less>`] if [`self`] is less than `other`.
+/// - [`Some<Ordering::Greater>`] if [`self`] is greater than `other`.
+/// - [`Some<Ordering::Equal>`] if [`self`] is equal to `other`.
+/// - [`None`] if there is no partial order (i.e., ranges overlap but are not equal).
+///
+/// # Examples
+///
+/// ```rust
+/// use svsm::elf::Elf64AddrRange;
+/// use core::cmp::Ordering;
+///
+/// let range1 = Elf64AddrRange { vaddr_begin: 0x1000, vaddr_end: 0x1100 };
+/// let range2 = Elf64AddrRange { vaddr_begin: 0x1100, vaddr_end: 0x1200 };
+///
+/// assert_eq!(range1.partial_cmp(&range2), Some(Ordering::Less));
+/// ```
+impl cmp::PartialOrd for Elf64AddrRange {
+ fn partial_cmp(&self, other: &Elf64AddrRange) -> Option<cmp::Ordering> {
+ //(...)
+```
+
+- Add section "Safety" if necessary to clarify what is unsafe, specially in
+ public (`pub`) interfaces, when using `unsafe` blocks or in cases where
+ undefined behavior may arise. For example:
+
+```rust
+/// # Safety
+///
+/// This function is marked as `unsafe` because it uses unsafe assembly.
+/// It is the responsibility of the caller to ensure the following:
+///
+/// 1. The pointer `data` must be valid and properly allocated memory.
+/// 2. The length `len` must accurately represent the number of elements in
+/// `data`.
+/// 3. The caller must also ensure that the memory is correctly initialized
+///
+pub unsafe fn example_memcpy<T>(dest: *mut T, src: *const T, len: usize) {
+ // Ensure the pointers are not null
+ assert!(!dest.is_null() && !src.is_null());
+ let mut rcx: usize;
+
+ unsafe {
+ asm!(
+ "rep movsb"
+ : "={rcx}"(rcx)
+ : "0"(len), "D"(dest), "S"(src)
+ : "memory"
+ );
+ }
+}
+```
+- We can't have a section "Panic" for every place the SVSM may panic, but
+ they should be included if your code checks assertions or uses the
+ `unwrap()` method. For instance:
+
+```rust
+/// # Panics
+///
+/// This function does not panic under normal circumstances. However, if
+/// the length `len` is greater than the allocated memory's actual capacity,
+/// it will panic. | I would simply say:
```suggestion
/// The function will panic if the provided length exceeds the buffer's capacity.
``` |
svsm | github_2023 | others | 145 | coconut-svsm | Freax13 | @@ -0,0 +1,199 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2023 SUSE LLC
+//
+// Author: Roy Hopkins <rhopkins@suse.de>
+
+extern crate alloc;
+
+use core::slice::from_raw_parts_mut;
+
+use alloc::sync::Arc;
+
+use super::{Mapping, RawAllocMapping, VMPageFaultResolution, VMPhysMem, VirtualMapping};
+use crate::address::Address;
+use crate::error::SvsmError;
+use crate::fs::FileHandle;
+use crate::mm::vm::VMR;
+use crate::mm::{pagetable::PageTable, PAGE_SIZE};
+
+#[derive(Debug)]
+struct VMWriteFileMapping(RawAllocMapping);
+
+impl VMWriteFileMapping {
+ pub fn get_alloc(&self) -> &RawAllocMapping {
+ &self.0
+ }
+
+ pub fn get_alloc_mut(&mut self) -> &mut RawAllocMapping {
+ &mut self.0
+ }
+}
+
+impl VirtualMapping for VMWriteFileMapping {
+ fn mapping_size(&self) -> usize {
+ self.0.mapping_size()
+ }
+
+ fn map(&self, offset: usize) -> Option<crate::address::PhysAddr> {
+ self.0.map(offset)
+ }
+
+ fn pt_flags(&self, _offset: usize) -> crate::mm::pagetable::PTEntryFlags {
+ PageTable::task_data_flags()
+ }
+}
+
+#[derive(Debug, PartialEq)]
+pub enum VMFileMappingPermission {
+ /// Read-only access to the file
+ Read,
+ // Read/Write access to a copy of the files pages
+ Write,
+ // Read-only access that allows execution
+ Execute,
+}
+
+/// Map view of a ramfs file into virtual memory
+#[derive(Debug)]
+pub struct VMFileMapping {
+ /// The file that this mapping relates to
+ file: FileHandle,
+
+ /// The page aligned offset into the file that the mappings starts from
+ offset: usize,
+
+ /// The size of the mapping in bytes
+ size: usize,
+
+ /// The permission to apply to the virtual mapping
+ permission: VMFileMappingPermission,
+
+ /// A copy of the file pages for mappings with Write permission
+ write_copy: Option<VMWriteFileMapping>,
+}
+
+impl VMFileMapping {
+ /// Create a new ['VMFileMapping'] for a file. The file provides the backing
+ /// pages for the file contents.
+ ///
+ /// # Arguments
+ ///
+ /// * 'file' - The file to create the mapping for. This instance keeps a
+ /// reference to the file until it is dropped.
+ ///
+ /// * 'offset' - The offset from the start of the file to map. This must be
+ /// align to PAGE_SIZE.
+ ///
+ /// * 'size' - The number of bytes to map starting from the offset. This
+ /// must be a multiple of PAGE_SIZE.
+ ///
+ /// # Returns
+ ///
+ /// Initialized mapping on success, Err(SvsmError::Mem) on error
+ pub fn new(
+ file: FileHandle,
+ offset: usize,
+ size: usize,
+ permission: VMFileMappingPermission,
+ ) -> Result<Self, SvsmError> {
+ if (offset & (PAGE_SIZE - 1)) != 0 || (size & (PAGE_SIZE - 1)) != 0 {
+ return Err(SvsmError::Mem);
+ }
+ if (offset + size) > file.size() {
+ return Err(SvsmError::Mem);
+ }
+
+ // For ranges with write access we need to take a copy of the ram pages
+ // to allow them to be written to without modifying the contents of the
+ // file itself and also to prevent pointer aliasing with any other
+ // FileHandles that may be open on the same file.
+ let write_copy = if permission == VMFileMappingPermission::Write {
+ Some(VMWriteFileMapping(RawAllocMapping::new(size)))
+ } else {
+ None
+ };
+
+ Ok(Self {
+ file,
+ offset,
+ size,
+ permission,
+ write_copy,
+ })
+ }
+}
+
+impl VirtualMapping for VMFileMapping {
+ fn mapping_size(&self) -> usize {
+ self.size
+ }
+
+ fn map(&self, offset: usize) -> Option<crate::address::PhysAddr> {
+ if let Some(write_copy) = &self.write_copy {
+ let write_addr = write_copy.map(offset);
+ if write_addr.is_some() {
+ write_addr
+ } else {
+ self.file.mapping(self.offset + offset)
+ }
+ } else {
+ self.file.mapping(self.offset + offset)
+ }
+ } | AFAICT this function and none of the function it calling end up calling `get_file_page`, so there reference count for the page is never increased. Am I missing something here? |
svsm | github_2023 | others | 145 | coconut-svsm | Freax13 | @@ -86,4 +106,22 @@ impl RawAllocMapping {
pub fn unmap(&self, _offset: usize) {
// Nothing to do for now
}
+
+ /// Check if a page has been allocated
+ ///
+ /// # Arguments
+ ///
+ /// * 'offset' - Byte offset into the mapping
+ ///
+ /// # Returns
+ ///
+ /// 'true' if the page containing the offset has been allocated
+ /// otherwise 'false'.
+ pub fn present(&self, offset: usize) -> bool {
+ let pfn = offset >> PAGE_SHIFT;
+ self.pages
+ .get(pfn)
+ .and_then(|r| r.as_ref().map(|_| ()))
+ .is_some() | ```suggestion
self.pages.get(pfn).and_then(|r| r.as_ref()).is_some()
``` |
svsm | github_2023 | others | 148 | coconut-svsm | vsntk18 | @@ -0,0 +1,154 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2023 SUSE LLC
+//
+// Author: Carlos López <carlos.lopez@suse.com>
+
+#![no_main]
+
+use arbitrary::Arbitrary;
+use core::alloc::{GlobalAlloc, Layout, LayoutError};
+use core::num::NonZeroUsize;
+use libfuzzer_sys::fuzz_target;
+use svsm::mm::alloc::{SvsmAllocator, TestRootMem};
+
+const MIN_ROOT_MEM_SIZE: usize = 0x8000;
+const MAX_ROOT_MEM_SIZE: usize = 0x100000;
+
+#[inline]
+fn adjust_mem_size(size: usize) -> usize {
+ MIN_ROOT_MEM_SIZE + (size % (MAX_ROOT_MEM_SIZE - MIN_ROOT_MEM_SIZE + 1))
+}
+
+#[derive(Arbitrary, Debug)]
+struct FuzzLayout {
+ size: usize,
+ align: usize,
+}
+
+impl TryFrom<FuzzLayout> for Layout {
+ type Error = LayoutError;
+
+ fn try_from(ly: FuzzLayout) -> Result<Self, Self::Error> {
+ Self::from_size_align(ly.size, ly.align)
+ }
+}
+
+/// A wrapper around SvsmAllocator that marks memory as initialized or
+/// uninitialized on allocation and deallocation respectively.
+struct PoisonAllocator {
+ heap: SvsmAllocator,
+}
+
+impl PoisonAllocator {
+ const POISON_BYTE: u8 = 0xf7;
+ const WRITE_BYTE: u8 = 0x8;
+
+ fn new() -> Self {
+ Self {
+ heap: SvsmAllocator::new(),
+ }
+ }
+
+ unsafe fn unpoison_mem(&self, ptr: *mut u8, size: usize) {
+ ptr.write_bytes(Self::WRITE_BYTE, size);
+ }
+
+ unsafe fn poison_mem(&self, ptr: *mut u8, size: usize) {
+ ptr.write_bytes(Self::POISON_BYTE, size);
+ }
+
+ unsafe fn check_mem(&self, ptr: *mut u8, size: usize) {
+ for i in 0..size {
+ assert_eq!(ptr.add(i).read_volatile(), Self::WRITE_BYTE);
+ }
+ }
+
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let ptr = self.heap.alloc(layout);
+ if !ptr.is_null() {
+ self.unpoison_mem(ptr, layout.size());
+ }
+ ptr
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ self.check_mem(ptr, layout.size());
+ self.poison_mem(ptr, layout.size());
+ self.heap.dealloc(ptr, layout);
+ }
+
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_layout: Layout) -> *mut u8 {
+ self.check_mem(ptr, layout.size());
+ self.poison_mem(ptr, layout.size());
+ let ptr = self.heap.realloc(ptr, layout, new_layout.size());
+ if !ptr.is_null() {
+ self.unpoison_mem(ptr, new_layout.size());
+ }
+ ptr
+ }
+}
+
+#[derive(Arbitrary, Debug)]
+enum Action {
+ Alloc(FuzzLayout),
+ Free(usize),
+ Realloc(usize, NonZeroUsize),
+ Read(usize),
+}
+
+#[derive(Arbitrary, Debug)]
+struct FuzzInput {
+ root_mem_size: usize,
+ actions: Vec<Action>,
+}
+
+fuzz_target!(|inp: FuzzInput| {
+ let _mem = TestRootMem::setup(adjust_mem_size(inp.root_mem_size));
+ let heap = PoisonAllocator::new();
+ let mut ptrs = Vec::new();
+
+ for action in inp.actions.into_iter() {
+ match action {
+ Action::Alloc(layout) => {
+ let Ok(layout) = Layout::try_from(layout) else {
+ continue;
+ };
+ let ptr = unsafe { heap.alloc(layout) };
+ if !ptr.is_null() {
+ ptrs.push((ptr, layout));
+ }
+ }
+ Action::Free(idx) => {
+ if let Some(idx) = idx.checked_rem(ptrs.len()) {
+ let (ptr, layout) = ptrs.swap_remove(idx);
+ unsafe { heap.dealloc(ptr, layout) };
+ }
+ }
+ Action::Read(idx) => {
+ if let Some(idx) = idx.checked_rem(ptrs.len()) {
+ let (ptr, layout) = ptrs[idx];
+ unsafe { heap.check_mem(ptr, layout.size()) };
+ };
+ }
+ Action::Realloc(idx, new_size) => {
+ let Some(idx) = idx.checked_rem(ptrs.len()) else {
+ continue;
+ };
+
+ // Try to get the new layout. Alignment must be the same.
+ let new_size = new_size.get();
+ let (ptr, layout) = ptrs.swap_remove(idx);
+ let Ok(new_layout) = Layout::from_size_align(new_size, layout.align()) else {
+ ptrs.push((ptr, layout));
+ continue;
+ };
+
+ let ptr = unsafe { heap.realloc(ptr, layout, new_layout) };
+ if !ptr.is_null() {
+ ptrs.push((ptr, new_layout));
+ }
+ }
+ }
+ } | Did you forget to free the memory chunks that might still be left in the _ptrs_ vector after exiting for loop? |
svsm | github_2023 | others | 148 | coconut-svsm | vsntk18 | @@ -0,0 +1,164 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2023 SUSE LLC
+//
+// Author: Carlos López <carlos.lopez@suse.com>
+
+#![no_main]
+
+use arbitrary::Arbitrary;
+use libfuzzer_sys::fuzz_target;
+use std::collections::BTreeSet;
+use svsm::address::VirtAddr;
+use svsm::mm::alloc::{
+ allocate_file_page, allocate_file_page_ref, allocate_page, allocate_pages, allocate_slab_page,
+ allocate_zeroed_page, free_page, get_order, TestRootMem,
+};
+use svsm::types::PAGE_SIZE;
+
+const WRITE_BYTE: u8 = 0x66;
+const POISON_BYTE: u8 = 0xfa;
+const MIN_ROOT_MEM_SIZE: usize = 0x80000;
+const MAX_ROOT_MEM_SIZE: usize = 0x100000;
+
+#[derive(Debug, Arbitrary)]
+struct FuzzInput {
+ root_mem_size: usize,
+ actions: Vec<Action>,
+}
+
+/// Actions during a fuzzing run
+#[derive(Debug, Arbitrary)]
+enum Action {
+ /// Allocate a regular page
+ Allocate,
+ /// Allocate a slab page
+ AllocateSlab,
+ /// Allocate pages of higher order
+ AllocatePages(usize),
+ /// Allocate a zeroed page
+ AllocateZeroed,
+ /// Allocate a file page
+ AllocateFile,
+ /// Write data to an allocated page
+ WritePage(usize),
+ /// Read data from an allocated & initialized page
+ ReadPage(usize),
+ /// Free an allocated page
+ Free(usize),
+ /// Allocate a page ref
+ AllocateFilePageRef,
+ /// Clone a page ref, increasing its refcount
+ CloneFilePageRef(usize),
+ /// Drop a page ref, decreasing its refcount
+ DropFilePageRef(usize),
+}
+
+#[inline]
+fn get_idx<T>(v: &[T], idx: usize) -> Option<usize> {
+ idx.checked_rem(v.len())
+}
+
+#[inline]
+fn get_item<T>(v: &[T], idx: usize) -> Option<&T> {
+ let idx = get_idx(v, idx)?;
+ Some(unsafe { v.get_unchecked(idx) })
+}
+
+#[inline]
+unsafe fn fill_page(page: VirtAddr, byte: u8) {
+ page.as_mut_ptr::<u8>().write_bytes(byte, PAGE_SIZE)
+}
+
+#[inline]
+fn adjust_mem_size(size: usize) -> usize {
+ MIN_ROOT_MEM_SIZE + (size % (MAX_ROOT_MEM_SIZE - MIN_ROOT_MEM_SIZE + 1))
+}
+
+fuzz_target!(|inp: FuzzInput| {
+ let _mem = TestRootMem::setup(adjust_mem_size(inp.root_mem_size));
+
+ // Regular pages
+ let mut pages = Vec::new();
+ // Initialized regular pages
+ let mut inited = BTreeSet::new();
+ // Page refs
+ let mut pagerefs = Vec::new();
+
+ for action in inp.actions.into_iter() {
+ match action {
+ Action::Allocate => {
+ if let Ok(page) = allocate_page() {
+ pages.push(page);
+ }
+ }
+ Action::AllocateSlab => {
+ if let Ok(page) = allocate_slab_page() {
+ pages.push(page);
+ }
+ }
+ Action::AllocatePages(size) => {
+ if let Ok(page) = allocate_pages(get_order(size)) {
+ pages.push(page);
+ }
+ }
+ Action::AllocateZeroed => {
+ if let Ok(page) = allocate_zeroed_page() {
+ pages.push(page);
+ inited.insert(page);
+ }
+ }
+ Action::AllocateFile => { | _get_file_page_() and _put_file_page()_ can also be called for a wider coverage? |
svsm | github_2023 | others | 153 | coconut-svsm | 00xc | @@ -46,3 +46,46 @@ pub fn zero_mem_region(start: VirtAddr, end: VirtAddr) {
// Zero region
unsafe { start.as_mut_ptr::<u8>().write_bytes(0, size) }
}
+
+#[cfg(test)]
+mod tests {
+
+ use crate::utils::util::*;
+
+ #[test]
+ fn test_mem_utils() {
+ // Align up
+ assert_eq!(align_up(7, 4), 8);
+ assert_eq!(align_up(15, 8), 16);
+ assert_eq!(align_up(10, 2), 10);
+ // Align down
+ assert_eq!(align_down(7, 4), 4);
+ assert_eq!(align_down(15, 8), 8);
+ assert_eq!(align_down(10, 2), 10);
+ // Page align up
+ assert_eq!(page_align_up(4096), 4096);
+ assert_eq!(page_align_up(4097), 8192);
+ assert_eq!(page_align_up(0), 0);
+ // Page offset
+ assert_eq!(page_offset(4096), 0);
+ assert_eq!(page_offset(4097), 1);
+ assert_eq!(page_offset(0), 0);
+ // Overlaps
+ assert!(overlap(1, 5, 3, 6));
+ assert!(overlap(0, 10, 5, 15));
+ assert!(!overlap(1, 5, 6, 8));
+ }
+
+ #[test]
+ fn test_zero_mem_region() {
+ let mut data: [u8; 10] = [1; 10];
+ let start = VirtAddr::new(&mut data[0] as *mut u8 as usize);
+ let end = start + 10; | ```suggestion
let start = VirtAddr::from(data.as_mut_ptr());
let end = start + core::mem::size_of_val(&data);
``` |
svsm | github_2023 | others | 136 | coconut-svsm | joergroedel | @@ -183,12 +170,12 @@ pub struct TaskContext {
pub struct Task {
pub rsp: u64,
- /// Information about the task stack
- pub stack: TaskStack,
-
/// Page table that is loaded when the task is scheduled
pub page_table: SpinLock<PageTableRef>,
+ /// Task virtual memory range
+ vm_range: VMR, | This needs a more specific name, as it covers the kernel VMR. We will get another VMR for user-space in the same structure. Maybe `vm_kernel_range`? |
svsm | github_2023 | others | 136 | coconut-svsm | joergroedel | @@ -171,6 +171,29 @@ impl GuestVmsaRef {
}
}
+/// Insert a mapping into the current PerCpu range and manage its lifecycle
+#[derive(Debug)]
+pub struct PerCpuMapping(VirtAddr);
+
+impl PerCpuMapping {
+ pub fn new(mapping: Arc<Mapping>) -> Result<Self, SvsmError> {
+ this_cpu_mut().vm_range.insert(mapping).map(Self)
+ }
+
+ pub fn virt_addr(&self) -> VirtAddr {
+ self.0
+ }
+}
+
+impl Drop for PerCpuMapping {
+ fn drop(&mut self) {
+ this_cpu_mut()
+ .vm_range
+ .remove(self.0)
+ .expect("Failed to remove PerCpuMapping managed range");
+ }
+} | Maybe we can look into making this more generic to work with any VMR instance. This requires taking a reference to the VMR within the struct and lifetime annotations. But this is just a note, no requirement for this PR. |
svsm | github_2023 | others | 120 | coconut-svsm | roy-hopkins | @@ -9,7 +9,17 @@ TARGET_PATH="debug"
endif
STAGE2_ELF = "target/x86_64-unknown-none/${TARGET_PATH}/stage2"
+ifdef TEST
+KERNEL_ELF = "target/x86_64-unknown-none/${TARGET_PATH}/deps/svsm-0651abaef6a489bb" | I'm not sure the metadata hash can be hardcoded here. I think it can change based on compiler version and other parameters. |
svsm | github_2023 | others | 120 | coconut-svsm | roy-hopkins | @@ -2,22 +2,28 @@ FEATURES ?= "default"
CARGO_ARGS = --features ${FEATURES}
ifdef RELEASE
-TARGET_PATH="release"
+TARGET_PATH=release
CARGO_ARGS += --release
else
-TARGET_PATH="debug"
+TARGET_PATH=debug
endif
STAGE2_ELF = "target/x86_64-unknown-none/${TARGET_PATH}/stage2"
ifdef TEST
-KERNEL_ELF = "target/x86_64-unknown-none/${TARGET_PATH}/deps/svsm-0651abaef6a489bb"
+KERNEL_ELF = target/x86_64-unknown-none/${TARGET_PATH}/svsm-test
CARGO_ENV_KERNEL = LINK_TEST=1
CARGO_TOOLCHAIN_KERNEL = +nightly
CARGO_ARGS_KERNEL = --tests
+CARGO_VERB_KERNEL = test
+# Define a test runner that simply copies the binary to a static location for
+# further processing. We do this because there's no good way to statically know
+# the test binary location.
+CARGO_ARGS_KERNEL += --config 'target.x86_64-unknown-none.runner=["sh", "-c", "cp $$0 ${KERNEL_ELF}"]' | That's a great solution! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.