repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
svsm
github_2023
others
390
coconut-svsm
stefano-garzarella
@@ -39,13 +39,21 @@ pub struct SevIdBlockBuilder { impl SevIdBlockBuilder { pub fn build(igvm: &IgvmFile, measure: &IgvmMeasure) -> Result<Self, Box<dyn Error>> { - let ld = measure.digest(); let compatibility_mask = get_compatibility_mask(igvm, IgvmPlatformType::SEV_SNP).ok_or( String::from("IGVM file is not compatible with the specified platform."), )?; let policy = get_policy(igvm, compatibility_mask) .ok_or(String::from("IGVM file does not contain a guest policy."))?; + let mut ld = [0u8; 48]; + let digest = measure.digest(); + if digest.len() != ld.len() { + return Err("Invalid digest value calculated for ID block".into()); + } + for (i, val) in measure.digest().iter().enumerate() { + ld[i] = *val; + }
What about using `copy_from_slice`? ```suggestion ld.copy_from_slice(&digest); ```
svsm
github_2023
others
390
coconut-svsm
stefano-garzarella
@@ -128,14 +129,17 @@ pub struct IgvmMeasure { show_progress: bool, check_kvm: bool, native_zero: bool, - digest: [u8; 48], + digest_snp: [u8; 48], + digest_es: Sha256, + digest: Vec<u8>,
I haven't looked in detail, but could we make it so that we only have `digest: Vec<u8>`?
svsm
github_2023
others
390
coconut-svsm
stefano-garzarella
@@ -39,13 +39,15 @@ pub struct SevIdBlockBuilder { impl SevIdBlockBuilder { pub fn build(igvm: &IgvmFile, measure: &IgvmMeasure) -> Result<Self, Box<dyn Error>> { - let ld = measure.digest(); let compatibility_mask = get_compatibility_mask(igvm, IgvmPlatformType::SEV_SNP).ok_or( String::from("IGVM file is not compatible with the specified platform."), )?; let policy = get_policy(igvm, compatibility_mask) .ok_or(String::from("IGVM file does not contain a guest policy."))?; + let mut ld = [0u8; 48]; + ld.copy_from_slice(measure.digest());
Should we put back the check on the length as you did in the previous version? copy_from_slice() should panic if the two slices have different sizes.
svsm
github_2023
others
356
coconut-svsm
deeglaze
@@ -10,19 +10,22 @@ pub enum SvsmPlatformType { Native = 0, Snp = 1, + Tdp = 2,
What's Tdp? Not meant to be Tdx?
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -24,6 +29,10 @@ pub struct CmdOptions { #[arg(short, long)] pub firmware: Option<String>, + /// Platform to generate IGVM file for
This makes it impossible to build a multi-platform IGVM file, as only one platform can be selected here. It would be much better to have --snp and --tdx as options that are not mutually exclusive, just like --native is already an optional platform target argument that creates multi-platform files. It would be reasonable to make every platform optional, and to generate an error message only if no platforms were selected.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (C) 2024 Intel Corporation +// +// Author: Peter Fang <peter.fang@intel.com> + +use crate::address::{PhysAddr, VirtAddr}; +use crate::cpu::percpu::PerCpu; +use crate::error::SvsmError; +use crate::io::IOPort; +use crate::platform::{PageEncryptionMasks, PageStateChangeOp, SvsmPlatform}; +use crate::sev::PvalidateOp; +use crate::svsm_console::SVSMIOPort; +use crate::types::PageSize; +use crate::utils::MemoryRegion; + +static CONSOLE_IO: SVSMIOPort = SVSMIOPort::new(); + +#[derive(Clone, Copy, Debug)] +pub struct TdpPlatform {} + +impl TdpPlatform { + pub fn new() -> Self { + Self {} + } +} + +impl Default for TdpPlatform { + fn default() -> Self { + Self::new() + } +} + +impl SvsmPlatform for TdpPlatform { + fn env_setup(&mut self) {} + + fn env_setup_late(&mut self) {} + + fn setup_percpu(&self, _cpu: &mut PerCpu) -> Result<(), SvsmError> { + Err(SvsmError::Tdx) + } + + fn setup_percpu_current(&self, _cpu: &mut PerCpu) -> Result<(), SvsmError> { + Err(SvsmError::Tdx) + } + + fn get_page_encryption_masks(&self, _vtom: usize) -> PageEncryptionMasks { + PageEncryptionMasks { + private_pte_mask: 0, + shared_pte_mask: 0,
Shouldn't this include the shared GPA bit 47? I understand this code isn't actually used for anything yet, but we might as well get the obvious stuff right from the beginning.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -62,9 +61,9 @@ impl IgvmBuilder { // revision. let mut use_igvm_v2 = false; - // SNP is always included in the compatibility mask regardless of the + // SNP/TDP is always included in the compatibility mask regardless of the
I think we'd like to avoid forcing the inclusion of any specific platform. I think a better approach would be to default to no platforms and require specific command-line options for every platform that should be included. The IGVM builder should generate an error if no platforms are selected. Then the Makefile can include both SNP and TDP as a default build target. This at least leaves open the possibility of building single-platform files in workflows that require it.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -36,16 +36,30 @@ else BUILD_FW = endif +PLATFORM ?= sev-snp +ifeq ($(PLATFORM), tdp) +BUILD_PLATFORM = --stage1 bin/stage1-trampoline.bin
Since your change intends to include TDP by default in the generated IGVM file, then shouldn't this also be built by default without requiring a specific `PLATFORM=` argument?
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -253,6 +244,17 @@ impl IgvmBuilder { }, )); } + if COMPATIBILITY_MASK.contains(TDP_COMPATIBILITY_MASK) { + self.platforms.push(IgvmPlatformHeader::SupportedPlatform( + IGVM_VHS_SUPPORTED_PLATFORM { + compatibility_mask: TDP_COMPATIBILITY_MASK, + highest_vtl: 2, + platform_type: IgvmPlatformType::TDX, + platform_version: 1, + shared_gpa_boundary: param_block.vtom,
The value for `shared_gpa_boundary` must be architecturally valid for TDX, but you're just copying the SNP value of 1<<46. I think you will need to design the file to have two different parameter blocks, one for SNP and one for TDX, which have different VTOM values. Alternatively, you could decide that the VTOM parameter is completely ignored on TDX, and you could simply set the platform's shared GPA boundary to be 1<<47.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -68,16 +82,16 @@ $(IGVMBUILDER): $(IGVMMEASURE): cargo build ${CARGO_ARGS} --target=x86_64-unknown-linux-gnu -p igvmmeasure -bin/coconut-qemu.igvm: $(IGVMBUILDER) $(IGVMMEASURE) bin/svsm-kernel.elf bin/stage2.bin ${FS_BIN} - $(IGVMBUILDER) --sort --policy 0x30000 --output $@ --stage2 bin/stage2.bin --kernel bin/svsm-kernel.elf --filesystem ${FS_BIN} ${BUILD_FW} qemu +bin/coconut-qemu.igvm: $(IGVMBUILDER) $(IGVMMEASURE) $(IGVM_QEMU_BINS) + $(IGVMBUILDER) --sort --policy 0x30000 --output $@ --stage2 bin/stage2.bin --kernel bin/svsm-kernel.elf --filesystem ${FS_BIN} ${BUILD_FW} ${BUILD_PLATFORM} qemu $(IGVMMEASURE) --check-kvm $@ measure -bin/coconut-hyperv.igvm: $(IGVMBUILDER) $(IGVMMEASURE) bin/svsm-kernel.elf bin/stage2.bin +bin/coconut-hyperv.igvm: $(IGVMBUILDER) $(IGVMMEASURE) $(IGVM_HYPERV_BINS) $(IGVMBUILDER) --sort --output $@ --stage2 bin/stage2.bin --kernel bin/svsm-kernel.elf --comport 3 hyper-v --native
Stage 1 needs to be included in the Hyper-V command line as well as the QEMU command line.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -391,6 +393,11 @@ impl IgvmBuilder { )?; } + // Add optional stage 1 binary. + if let Some(stage1) = &self.options.stage1 { + self.add_data_pages_from_file(&stage1.clone(), self.gpa_map.stage1_image.get_start())?;
The stage 1 image must only be included for TDP platforms, not for SNP platforms. Since the desire is to create a multi-platform IGVM file, the stage 1 TDP image will be included on the command line even if SNP is available as a target platform. But including stage 1 for SNP will cause the IGVM file to fail to load, because the loader may not treat the reset page as valid memory for SNP VMs. Therefore, the compatibility mask for the page data for stage 1 must be set to the TDP compatibility mask only.
svsm
github_2023
others
356
coconut-svsm
msft-jlange
@@ -8,6 +8,10 @@ use clap::{Parser, ValueEnum}; #[derive(Parser, Debug)] pub struct CmdOptions { + /// Optional stage 1 binary file + #[arg(long)] + pub stage1: Option<String>,
Now that this file has been made specific to TDX, I wonder if we should rename this parameter to `--tdx-stage1` or something similar. I can envision that in the future, there may be another platform that requires use of a stage 1, which must be a different binary than the TDX stage 1, and we don't want any collision to arise.
svsm
github_2023
others
356
coconut-svsm
stefano-garzarella
@@ -32,8 +32,9 @@ use crate::stage2_stack::Stage2Stack; use crate::vmsa::{construct_start_context, construct_vmsa}; use crate::GpaMap; -pub const SNP_COMPATIBILITY_MASK: u32 = 1; -pub const NATIVE_COMPATIBILITY_MASK: u32 = 2; +pub const SNP_COMPATIBILITY_MASK: u32 = 1u32 << 0; +pub const NATIVE_COMPATIBILITY_MASK: u32 = 1u32 << 1;
nit: I would have made these changes in a separate patch
svsm
github_2023
others
356
coconut-svsm
stefano-garzarella
@@ -158,7 +168,7 @@ clippy: clean: cargo clean rm -f stage1/*.o stage1/*.bin stage1/*.elf - rm -f ${STAGE1_OBJS} utils/gen_meta utils/print-meta + rm -f utils/gen_meta utils/print-meta
Why we don't need to remove `${STAGE1_OBJS}` anymore?
svsm
github_2023
others
387
coconut-svsm
msft-jlange
@@ -108,31 +107,6 @@ impl PerCpuAreas { } } -#[derive(Copy, Clone, Debug, Default)] -pub struct VmsaRef {
I'm a little reluctant to lose all of the information associated with the VMSA. Today, the `guest_owned` field is not used, and the `paddr` is only needed in a couple of specific places where the VMSA is created or bound. However, in the future, when we support a paravisor mode in addition to the SVSM mode, it will be possible for VMSAs to come both from guest-owned memory and from SVSM-owned memory, and I suspect we will start introducing code that needs to be able to tell the difference. It's a little harder for me to envision a need for tracking the physical address, since I think it will probably only be needed in the same places that the physical address is used today (for which you have an answer) but if we need to keep a tuple around anyway, then retaining the physical address is a good idea. We can always remove it if it is not needed when the code base becomes more complete, and that would be a lot easier than removing it now and then discovering later that we need to add it back.
svsm
github_2023
others
387
coconut-svsm
msft-jlange
@@ -311,13 +285,20 @@ impl PerCpuShared { const _: () = assert!(size_of::<PerCpu>() <= PAGE_SIZE); +/// CPU-local data. +/// +/// This type is not [`Sync`], as its contents will only be accessed from the +/// local CPU, much like thread-local data in an std environment. The only +/// part of the struct that may be accessed from a different CPU is the +/// `shared` field, a reference to which will be stored in [`PERCPU_AREAS`]. #[derive(Debug)] pub struct PerCpu { + /// Per-CPU storage that might be accessed from other CPUs. shared: PerCpuShared, pgtbl: RefCell<PageTableRef>, tss: Cell<X86Tss>, - svsm_vmsa: Cell<Option<VmsaRef>>, + svsm_vmsa: Cell<Option<&'static VMSA>>,
Once the SVSM VMSA has been allocated and initialized, it can never be changed again. Perhaps `OnceCell` would be more appropriate here to eliminate the possibility that any code tries to change the VMSA. It is also the case that once the VMSA has been initialized and communicated to the host, it can never even be accessed again by the SVSM, so I'm not sure the `PerCpu` structure even needs to track the VMSA allocation at all. We need to make sure that when the VMSA page is allocated, it is never freed - so we want to effectively leak the allocation after it has been made, but it doesn't have to be tracked after that point.
svsm
github_2023
others
388
coconut-svsm
roy-hopkins
@@ -236,7 +236,16 @@ $ make test Unit tests can be run inside the SVSM by ``` -$ QEMU=/path/to/qemu OVMF=/path/to/firmware/ make test-in-svsm +$ QEMU=/path/to/qemu make test-in-svsm +``` + +Note: to compile the test kernel used for unit tests, we use the nightly +toolchain, so if the test kernel build fails, try installing the +`x86_64-unknown-none` target for the nighlty toolchain via your distro or
Typo: `nighlty`.
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -734,4 +734,78 @@ impl LocalApic { } } } + + fn handoff_to_host(&mut self) { + let hv_doorbell = this_cpu().hv_doorbell().unwrap(); + let descriptor = &hv_doorbell.per_vmpl[GUEST_VMPL - 1]; + // Establish the IRR as holding multiple vectors regardless of the + // number of active vectors, as this makes transferring IRR state + // simpler. + let multiple_vectors_mask: u32 = HVExtIntStatus::new().with_multiple_vectors(true).into(); + descriptor + .status + .fetch_or(multiple_vectors_mask, Ordering::Relaxed); + + // If a single, edge-triggered interrupt is present in the interrupt + // descriptor, then transfer it to the local IRR. Level-sensitive + // interrupts can be left alone since the host must be prepared to + // consume those directly. Note that consuming the interrupt does not + // require zeroing the vector, since the host is supposed to ignore the + // vector field when multiple vectors are present (except for the case + // of level-sensitive interrupts). + let flags = HVExtIntStatus::from(descriptor.status.load(Ordering::Relaxed)); + if flags.pending_vector() >= 31 && !flags.level_sensitive() { + Self::insert_vector_register(&mut self.irr, flags.pending_vector()); + } + + // Copy vector 31 if required, and then insert all of the additional + // IRR fields into the host IRR. + if self.irr[0] & 0x8000 != 0 {
Should self.irr[0] be `anded` with `0x8000 0000` instead of `0x8000` to check for vector 31? While at vector 31, can you please clarify its usage? Both AMD and Intel spec state vector 31 as reserved.
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -21,13 +21,40 @@ pub struct HVDoorbellFlags { pub no_further_signal: bool, } +#[bitfield(u32)] +pub struct HVExtIntStatus { + pub pending_vector: u8, + pub nmi_pending: bool, + pub mc_pending: bool, + pub level_sensitive: bool, + #[bits(3)] + rsvd_13_11: u32, + pub multiple_vectors: bool, + #[bits(12)] + rsvd_26_15: u32, + ipi_requested: bool, + #[bits(3)] + rsvd_30_28: u32, + pub vector_31: bool, +} + +#[repr(C)] +#[derive(Debug)] +pub struct HVExtIntInfo { + pub status: AtomicU32, + pub irr: [AtomicU32; 7], + pub isr: [AtomicU32; 8],
From the alternate injection spec, it looks like extended interrupt descriptor for VMPL1 is between `64-95` bytes and only contains IRR info. Is there a newer version of spec where bytes `96-127 ` represent ISR info?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -21,13 +21,40 @@ pub struct HVDoorbellFlags { pub no_further_signal: bool, } +#[bitfield(u32)] +pub struct HVExtIntStatus { + pub pending_vector: u8, + pub nmi_pending: bool, + pub mc_pending: bool, + pub level_sensitive: bool, + #[bits(3)] + rsvd_13_11: u32, + pub multiple_vectors: bool, + #[bits(12)] + rsvd_26_15: u32, + ipi_requested: bool, + #[bits(3)] + rsvd_30_28: u32, + pub vector_31: bool, +} + +#[repr(C)] +#[derive(Debug)] +pub struct HVExtIntInfo { + pub status: AtomicU32, + pub irr: [AtomicU32; 7], + pub isr: [AtomicU32; 8], +} + #[repr(C)] #[derive(Debug)] pub struct HVDoorbell { pub vector: AtomicU8, pub flags: AtomicU8, pub no_eoi_required: AtomicU8, - reserved: u8, + pub per_vmpl_events: AtomicU8, + reserved_31_4: [u8; 60],
should `reserved_31_4` -> `reserved_63_4`?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -0,0 +1,166 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::platform::guest_cpu::GuestCpuState; + +#[derive(Clone, Copy, Debug, Default)] +pub struct LocalApic { + irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_irr(&mut self, irq: u8) { + self.irr[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_irr(&mut self, irq: u8) { + self.irr[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + assert!(self.isr_stack_index != 0); + assert!(self.isr_stack[self.isr_stack_index - 1] == irq); + self.insert_irr(irq); + self.isr_stack_index -= 1; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>(&mut self, cpu_state: &mut T) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered {
Sorry if I overlooked but is `interrupt_delivered` is set anywhere? I only see it being initialized to `false`.
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -306,12 +317,14 @@ impl LocalApic { // Mark this interrupt in-service. It will be recalled if // the ISR is examined again before the interrupt is actually // delivered. - self.remove_irr(irq); + Self::remove_vector_register(&mut self.irr, irq); self.isr_stack[self.isr_stack_index] = irq; self.isr_stack_index += 1; - // Configure a lazy EOI if possible. - if try_lazy_eoi { + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && Self::test_vector_register(&self.tmr, irq) {
Should it should be `!`Self::test_vector_register(&self.tmr, irq) right? As lazy EOI is not possible for level-sensitive interrupts.
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -227,17 +279,47 @@ impl LocalApic { if (irq & 0xF0) > (current_priority & 0xF0) { // Determine whether this interrupt can be injected // immediately. If not, queue it for delivery when possible. - if !self.deliver_interrupt_immediately(irq, cpu_state) { + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { cpu_state.queue_interrupt(irq); self.interrupt_queued = true; - } + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; // Mark this interrupt in-service. It will be recalled if // the ISR is examined again before the interrupt is actually // delivered. self.remove_irr(irq); self.isr_stack[self.isr_stack_index] = irq; self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. + if try_lazy_eoi { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + self.lazy_eoi_pending = true;
Should this setting ` self.lazy_eoi_pending = true` be removed as it is set again after checking from calling area? Also, comment may need to be updated?
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -117,6 +129,76 @@ impl SvsmPlatform for SnpPlatform { pvalidate_range(region, PvalidateOp::Invalid) } + fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError> { + // If alternate injection was requested, then it must be supported by + // the hypervisor. + if alt_inj_requested + && !hypervisor_ghcb_features().contains(GHCBHvFeatures::SEV_SNP_EXT_INTERRUPTS) + { + return Err(NotSupported); + } + + self.use_alternate_injection = alt_inj_requested; + Ok(()) + } + + fn use_alternate_injection(&self) -> bool { + self.use_alternate_injection + } + + fn lock_unlock_apic_emulation(&self, lock: bool) -> Result<(), SvsmError> { + // The lock state can only be changed if APIC emulation has not already + // been disabled on any CPU. + let new_state = if lock { + APIC_EMULATION_LOCKED + } else { + APIC_EMULATION_ENABLED + }; + let mut current = APIC_EMULATION_STATE.load(Ordering::Relaxed); + loop { + if current == APIC_EMULATION_DISABLED { + return Err(SvsmError::Apic); + } + match APIC_EMULATION_STATE.compare_exchange_weak( + current, + new_state, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(val) => current = val, + } + } + + Ok(()) + } + + fn disable_apic_emulation(&self) -> Result<(), SvsmError> { + // APIC emulation can only be disabled if it is not locked. + let mut current = APIC_EMULATION_STATE.load(Ordering::Relaxed); + loop { + if current == APIC_EMULATION_LOCKED { + return Err(SvsmError::Apic); + } + match APIC_EMULATION_STATE.compare_exchange_weak( + current, + APIC_EMULATION_DISABLED, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(val) => current = val, + } + } + + Ok(()) + } + + fn post_irq(&self, icr: u64) -> Result<(), SvsmError> { + current_ghcb().hv_ipi(icr)?; + Ok(())
```suggestion current_ghcb().hv_ipi(icr) ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -117,6 +129,76 @@ impl SvsmPlatform for SnpPlatform { pvalidate_range(region, PvalidateOp::Invalid) } + fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError> { + // If alternate injection was requested, then it must be supported by + // the hypervisor. + if alt_inj_requested + && !hypervisor_ghcb_features().contains(GHCBHvFeatures::SEV_SNP_EXT_INTERRUPTS) + { + return Err(NotSupported);
`SvsmError` is already imported, so simply: ```suggestion return Err(SvsmError::NotSupported); ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +}
We can simply add `repr(u64)` and implement these conversions with the `From` trait. ```suggestion #[repr(u64)] #[derive(Debug, PartialEq)] enum IcrDestFmt { Dest = 0, OnlySelf = 1, AllWithSelf = 2, AllButSelf = 3, } impl From<IcrDestFmt> for u64 { fn from(val: IcrDestFmt) -> Self { val as _ } } impl From<u64> for IcrDestFmt { fn from(value: u64) -> Self { match value { 3 => Self::AllButSelf, 2 => Self::AllWithSelf, 1 => Self::OnlySelf, _ => Self::Dest, } } } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +}
Same thing here, use `repr(u64)` and `From`
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + }
```suggestion /// Scan to find the highest pending IRR vector. fn scan_irr(&self) -> u8 { for (i, irr) in self.irr.into_iter().enumerate().rev() { if irr != 0 { let bit_index = 31 - irr.leading_zeros(); let vector = (i as u32) * 32 + bit_index; return vector.try_into().unwrap(); } } 0 } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + assert!(self.isr_stack_index != 0); + assert!(self.isr_stack[self.isr_stack_index - 1] == irq); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index -= 1; + self.update_required = true;
Using `checked_sub()` (and using a safe array accessor) is probably more readable: ```suggestion let stack_index = self.isr_stack_index.checked_sub(1).unwrap(); assert!(self.isr_stack.get(stack_index) == Some(irq)); Self::insert_vector_register(&mut self.irr, irq); self.isr_stack_index = stack_index; self.update_required = true; ``` Although I'd suggest returning an error.
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + assert!(self.isr_stack_index != 0); + assert!(self.isr_stack[self.isr_stack_index - 1] == irq); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index -= 1; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error + // occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } else { + None + }
If `caa_addr` is None you can just return early with `?`: ```suggestion let virt_addr = caa_addr?; let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); // Ignore errors here, since nothing can be done if an error occurs. if let Ok(caa) = calling_area.read() { let _ = calling_area.write(caa.update_no_eoi_required(0)); } Some(calling_area) ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + assert!(self.isr_stack_index != 0); + assert!(self.isr_stack[self.isr_stack_index - 1] == irq); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index -= 1; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error + // occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } else { + None + } + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector an transfer any pending IPIs into the local + // IRR vector. + for i in 0..8 { + self.irr[i] |= cpu_shared.ipi_irr_vector(i); + }
```suggestion for (i, irr) in self.irr.iter_mut().enumerate() { *irr |= cpu_shared.ipi_irr_vector(i); } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,851 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::ghcb::current_ghcb; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{get_current_apic_id, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for i in (0..7).rev() { + if self.irr[i] != 0 { + let bit_index = 31 - self.irr[i].leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + assert!(self.isr_stack_index != 0); + assert!(self.isr_stack[self.isr_stack_index - 1] == irq); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index -= 1; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error + // occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } else { + None + } + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector an transfer any pending IPIs into the local + // IRR vector. + for i in 0..8 { + self.irr[i] |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) { + // Determine whether this interrupt can be injected + // immediately. If not, queue it for delivery when possible. + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { + cpu_state.queue_interrupt(irq); + self.interrupt_queued = true; + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; + + // Mark this interrupt in-service. It will be recalled if + // the ISR is examined again before the interrupt is actually + // delivered. + Self::remove_vector_register(&mut self.irr, irq); + self.isr_stack[self.isr_stack_index] = irq; + self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && Self::test_vector_register(&self.tmr, irq) { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + self.lazy_eoi_pending = true; + if let Some(calling_area) = guest_caa { + if let Ok(caa) = calling_area.read() { + if calling_area.write(caa.update_no_eoi_required(1)).is_ok() { + self.lazy_eoi_pending = true; + } + } + } + } + } + } + } + } + + fn perform_host_eoi(vector: u8) { + // Errors from the host are not expected and cannot be meaningfully + // handled, so simply ignore them. + let _r = current_ghcb().specific_eoi(vector, GUEST_VMPL.try_into().unwrap()); + assert!(_r.is_ok()); + } + + pub fn perform_eoi(&mut self) { + // Pop any in-service interrupt from the stack, and schedule the APIC + // for reevaluation. + if self.isr_stack_index != 0 { + self.isr_stack_index -= 1; + let vector = self.isr_stack[self.isr_stack_index]; + if Self::test_vector_register(&self.tmr, vector) { + if Self::test_vector_register(&self.host_tmr, vector) { + Self::perform_host_eoi(vector); + Self::remove_vector_register(&mut self.host_tmr, vector); + } else { + // FIXME: should do something with locally generated + // level-sensitive interrupts. + } + Self::remove_vector_register(&mut self.tmr, vector); + } + self.update_required = true; + self.lazy_eoi_pending = false; + } + } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for i in 0..self.isr_stack_index { + if (usize::from(self.isr_stack[i] >> 5)) == index { + value |= 1 << (self.isr_stack[i] & 0x1F) + } + }
```suggestion for irq in self.isr_stack.into_iter().take(self.isr_stack_index) { if (usize::from(irq >> 5)) == index { value |= 1 << (irq & 0x1F) } } ```
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -4,12 +4,98 @@ // // Author: Jon Lange (jlange@microsoft.com) +use crate::cpu::percpu::PerCpuShared; use crate::platform::guest_cpu::GuestCpuState; -#[derive(Clone, Copy, Debug, Default)] +use bitfield_struct::bitfield; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize,
Aren't bits `16-17 (remote_read_status)` reserved?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -393,6 +410,128 @@ impl LocalApic { self.update_required = true; } + fn send_logical_ipi(&mut self, icr: ApicIcr) -> bool { + let vector = icr.vector(); + let mut signal = false; + + // Check whether the current CPU matches the destination. + let destination = icr.destination(); + let apic_id = get_current_apic_id(); + if Self::logical_destination_match(destination, apic_id) { + self.post_interrupt(vector, false);
If current cpu matches the logical destination, can the code return instead of checking other cpus?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -449,4 +457,102 @@ impl LocalApic { self.allowed_irr[index] &= !mask; } } + + fn signal_one_host_interrupt(&mut self, vector: u8) { + let index = (vector >> 5) as usize; + let mask = 1 << (vector & 31);
Nit: Not sure if this is an issue but for uniformity in the patch it may be better to `1` -> `1u32`?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -165,4 +251,117 @@ impl LocalApic { self.update_required = true; } } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for i in 0..self.isr_stack_index { + if (usize::from(self.isr_stack[i] >> 5)) == index { + value |= 1 << (self.isr_stack[i] & 0x1F) + } + } + value + } + + fn post_interrupt(&mut self, irq: u8) { + // Set the appropriate bit in the IRR. Once set, signal that interrupt + // processing is required before returning to the guest. + self.insert_irr(irq); + self.update_required = true; + } + + pub fn read_register<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + register: u64, + ) -> Result<u64, ApicError> { + // Rewind any undelivered interrupt so it is reflected in any register + // read. + self.check_delivered_interrupts(cpu_state); + + match register { + APIC_REGISTER_APIC_ID => Ok(u64::from(cpu_shared.apic_id())), + APIC_REGISTER_IRR_0..=APIC_REGISTER_IRR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.irr[index] as u64) + } + APIC_REGISTER_ISR_0..=APIC_REGISTER_ISR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + Ok(self.get_isr(offset.try_into().unwrap()) as u64) + } + APIC_REGISTER_TPR => Ok(cpu_state.get_tpr() as u64), + APIC_REGISTER_PPR => Ok(self.get_ppr(cpu_state) as u64), + _ => Err(ApicError::ApicError), + } + } + + fn handle_icr_write(&mut self, value: u64) -> Result<(), ApicError> { + let icr = ApicIcr::from(value); + + // Only fixed interrupts can be handled. + if icr.message_type() != IcrMessageType::Fixed { + return Err(ApicError::ApicError); + } + + // Only asserted edge-triggered interrupts can be handled. + if icr.trigger_mode() || !icr.assert() { + return Err(ApicError::ApicError); + } + + // FIXME - support destinations other than self. + if icr.destination_shorthand() != IcrDestFmt::OnlySelf { + return Err(ApicError::ApicError); + } + + self.post_interrupt(icr.vector()); + + Ok(()) + } + + pub fn write_register<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + register: u64, + value: u64, + ) -> Result<(), ApicError> { + // Rewind any undelivered interrupt so it is correctly processed by + // any register write. + self.check_delivered_interrupts(cpu_state); + + match register { + APIC_REGISTER_TPR => { + // TPR must be an 8-bit value. + if value > 0xFF { + Err(ApicError::ApicError) + } else { + cpu_state.set_tpr((value & 0xFF) as u8); + Ok(()) + } + } + APIC_REGISTER_EOI => { + self.perform_eoi(); + Ok(()) + } + APIC_REGISTER_ICR => self.handle_icr_write(value), + APIC_REGISTER_SELF_IPI => { + if value > 0xFF { + Err(ApicError::ApicError) + } else { + self.post_interrupt((value & 0xFF) as u8); + Ok(()) + } + } + _ => Err(ApicError::ApicError), + } + } + pub fn configure_vector(&mut self, vector: u8, allowed: bool) { + let index = (vector >> 5) as usize; + let mask = 1 << (vector & 31);
Nit: `1` -> `1u32`?
svsm
github_2023
others
368
coconut-svsm
vijaydhanraj
@@ -167,4 +253,117 @@ impl LocalApic { self.update_required = true; } } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for isr in self.isr_stack.into_iter().take(self.isr_stack_index) { + if (usize::from(isr >> 5)) == index { + value |= 1 << (isr & 0x1F) + } + } + value + } + + fn post_interrupt(&mut self, irq: u8) { + // Set the appropriate bit in the IRR. Once set, signal that interrupt + // processing is required before returning to the guest. + self.insert_irr(irq); + self.update_required = true; + } + + pub fn read_register<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + register: u64, + ) -> Result<u64, ApicError> { + // Rewind any undelivered interrupt so it is reflected in any register + // read. + self.check_delivered_interrupts(cpu_state); + + match register { + APIC_REGISTER_APIC_ID => Ok(u64::from(cpu_shared.apic_id())), + APIC_REGISTER_IRR_0..=APIC_REGISTER_IRR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.irr[index] as u64) + } + APIC_REGISTER_ISR_0..=APIC_REGISTER_ISR_7 => { + let offset = register - APIC_REGISTER_IRR_0;
I think this might be a typo, `APIC_REGISTER_IRR_0` -> `APIC_REGISTER_ISR_0`?
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -221,6 +221,10 @@ impl IgvmBuilder { kernel_size: self.gpa_map.kernel.get_size() as u32, kernel_base: self.gpa_map.kernel.get_start(), vtom, + use_alternate_injection: match self.options.alt_injection { + true => 1, + false => 0, + },
[`u8` implements `From<bool>`](https://doc.rust-lang.org/std/primitive.u8.html#impl-From%3Cbool%3E-for-u8) ```suggestion use_alternate_injection: u8::from(self.options.alt_injection), ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::cpu::percpu::this_cpu; +use crate::platform::SVSM_PLATFORM; +use crate::protocols::errors::SvsmReqError; +use crate::protocols::RequestParams; + +const SVSM_REQ_APIC_QUERY_FEATURES: u32 = 0; +const SVSM_REQ_APIC_CONFIGURE: u32 = 1; +const SVSM_REQ_APIC_READ_REGISTER: u32 = 2; +const SVSM_REQ_APIC_WRITE_REGISTER: u32 = 3; +const SVSM_REQ_APIC_CONFIGURE_VECTOR: u32 = 4; + +const SVSM_APIC_CONFIGURE_DISABLED: u64 = 0; +const SVSM_APIC_CONFIGURE_ENABLED: u64 = 1; +const SVSM_APIC_CONFIGURE_LOCKED: u64 = 2; + +pub const APIC_PROTOCOL: u32 = 3; +pub const APIC_PROTOCOL_VERSION_MIN: u32 = 1; +pub const APIC_PROTOCOL_VERSION_MAX: u32 = 1; + +const SVSM_ERR_APIC_CANNOT_DISABLE: u64 = 0; +const SVSM_ERR_APIC_CANNOT_LOCK: u64 = 1; + +fn apic_query_features(params: &mut RequestParams) -> Result<(), SvsmReqError> { + // No features are supported beyond the base feature set. + params.rcx = 0; + Ok(()) +} + +fn apic_configure(params: &RequestParams) -> Result<(), SvsmReqError> { + match params.rcx { + SVSM_APIC_CONFIGURE_DISABLED => this_cpu() + .disable_apic_emulation() + .map_err(|_| SvsmReqError::protocol(SVSM_ERR_APIC_CANNOT_DISABLE)), + SVSM_APIC_CONFIGURE_ENABLED => { + // If this fails, the platform is known not to be in the locked + // state, so any error can be ignored in that case. + let _ = SVSM_PLATFORM.as_dyn_ref().lock_unlock_apic_emulation(false); + Ok(()) + } + SVSM_APIC_CONFIGURE_LOCKED => SVSM_PLATFORM + .as_dyn_ref() + .lock_unlock_apic_emulation(false) + .map_err(|_| SvsmReqError::protocol(SVSM_ERR_APIC_CANNOT_LOCK)), + _ => Err(SvsmReqError::invalid_parameter()), + } +} + +fn apic_read_register(params: &mut RequestParams) -> Result<(), SvsmReqError> { + let cpu = this_cpu(); + if !cpu.use_apic_emulation() { + return Err(SvsmReqError::invalid_request()); + }
We're already performing this check in `apic_protocol_request()`, I'd say it's redundant, since the function is not pub. Same thing with a few of the other APIC request handling functions.
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,850 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for (i, irr) in self.irr.into_iter().enumerate().rev() { + if irr != 0 { + let bit_index = 31 - irr.leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + let new_index = self.isr_stack_index.checked_sub(1).unwrap(); + assert!(self.isr_stack.get(new_index) == Some(&irq)); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index = new_index; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + let virt_addr = caa_addr?; + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector and transfer any pending IPIs into the local + // IRR vector. + for (i, irr) in self.irr.iter_mut().enumerate() { + *irr |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) { + // Determine whether this interrupt can be injected + // immediately. If not, queue it for delivery when possible. + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + self.interrupt_delivered = true; + + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { + cpu_state.queue_interrupt(irq); + self.interrupt_queued = true; + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; + + // Mark this interrupt in-service. It will be recalled if + // the ISR is examined again before the interrupt is actually + // delivered. + Self::remove_vector_register(&mut self.irr, irq); + self.isr_stack[self.isr_stack_index] = irq; + self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + if let Some(calling_area) = guest_caa { + if let Ok(caa) = calling_area.read() { + if calling_area.write(caa.update_no_eoi_required(1)).is_ok() { + // Only track a pending lazy EOI if the + // calling area page could successfully be + // updated. + self.lazy_eoi_pending = true; + } + } + } + } + } + } + } + } + + fn perform_host_eoi(vector: u8) { + // Errors from the host are not expected and cannot be meaningfully + // handled, so simply ignore them. + let _r = current_ghcb().specific_eoi(vector, GUEST_VMPL.try_into().unwrap()); + assert!(_r.is_ok()); + } + + pub fn perform_eoi(&mut self) { + // Pop any in-service interrupt from the stack, and schedule the APIC + // for reevaluation. + if self.isr_stack_index != 0 { + self.isr_stack_index -= 1; + let vector = self.isr_stack[self.isr_stack_index]; + if Self::test_vector_register(&self.tmr, vector) { + if Self::test_vector_register(&self.host_tmr, vector) { + Self::perform_host_eoi(vector); + Self::remove_vector_register(&mut self.host_tmr, vector); + } else { + // FIXME: should do something with locally generated + // level-sensitive interrupts. + } + Self::remove_vector_register(&mut self.tmr, vector); + } + self.update_required = true; + self.lazy_eoi_pending = false; + } + } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for isr in self.isr_stack.into_iter().take(self.isr_stack_index) { + if (usize::from(isr >> 5)) == index { + value |= 1 << (isr & 0x1F) + } + } + value + } + + fn post_interrupt(&mut self, irq: u8, level_sensitive: bool) { + // Set the appropriate bit in the IRR. Once set, signal that interrupt + // processing is required before returning to the guest. + Self::insert_vector_register(&mut self.irr, irq); + if level_sensitive { + Self::insert_vector_register(&mut self.tmr, irq); + } + self.update_required = true; + } + + fn post_icr_interrupt(&mut self, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + self.nmi_pending = true; + self.update_required = true; + } else { + self.post_interrupt(icr.vector(), false); + } + } + + fn post_ipi_one_target(cpu: &PerCpuShared, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + cpu.request_nmi(); + } else { + cpu.request_ipi(icr.vector()); + } + } + + fn send_logical_ipi(&mut self, icr: ApicIcr) -> bool { + let mut signal = false; + + // Check whether the current CPU matches the destination. + let destination = icr.destination(); + let apic_id = this_cpu().get_apic_id(); + if Self::logical_destination_match(destination, apic_id) { + self.post_icr_interrupt(icr); + } + + // Enumerate all CPUs to see which have APIC IDs that match the + // requested destination. Skip the current CPU, since it was checked + // above. + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + let this_apic_id = cpu.apic_id(); + if (this_apic_id != apic_id) + && Self::logical_destination_match(destination, this_apic_id) + { + Self::post_ipi_one_target(cpu, icr); + signal = true; + } + } + + signal + } + + fn logical_destination_match(destination: u32, apic_id: u32) -> bool { + // CHeck for a cluster match. + if (destination >> 16) != (apic_id >> 4) { + false + } else { + let bit = 1u32 << (apic_id & 0xF); + (destination & bit) != 0 + } + } + + fn send_physical_ipi(&mut self, icr: ApicIcr) -> bool { + // If the target APIC ID matches the current processor, then treat this + // as a self-IPI. Otherwise, locate the target processor by APIC ID. + let destination = icr.destination(); + if destination == this_cpu().get_apic_id() { + self.post_interrupt(icr.vector(), false); + false + } else { + // If the target CPU cannot be located, then simply drop the + // request. + if let Some(cpu) = PERCPU_AREAS.get(destination) { + cpu.request_ipi(icr.vector()); + true + } else { + false + } + } + } + + fn send_ipi(&mut self, icr: ApicIcr) { + let (signal_host, include_others, include_self) = match icr.destination_shorthand() { + IcrDestFmt::Dest => { + if icr.destination() == 0xFFFF_FFFF { + // This is a broadcast, so treat it as all with self. + (true, true, true) + } else { + let signal_host = if icr.destination_mode() { + self.send_logical_ipi(icr) + } else { + self.send_physical_ipi(icr) + }; + + // Any possible self-IPI was handled above as part of + // delivery to the correct destination. + (signal_host, false, false) + } + } + IcrDestFmt::OnlySelf => (false, false, true), + IcrDestFmt::AllButSelf => (true, true, false), + IcrDestFmt::AllWithSelf => (true, true, true), + }; + + if include_others { + // Enumerate all processors in the system except for the + // current CPU and indicate that an IPI has been requested. + let apic_id = this_cpu().get_apic_id(); + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + if cpu.apic_id() != apic_id { + Self::post_ipi_one_target(cpu, icr); + } + } + } + + if include_self { + self.post_icr_interrupt(icr); + } + + if signal_host { + // Calculate an ICR value to use for a host IPI request. This will + // be a fixed interrupt on the interrupt notification vector using + // the destination format specified in the ICR value. + let mut hv_icr = ApicIcr::new() + .with_vector(INT_INJ_VECTOR as u8) + .with_message_type(IcrMessageType::Fixed) + .with_destination_mode(icr.destination_mode()) + .with_destination_shorthand(icr.destination_shorthand()) + .with_destination(icr.destination()); + + // Avoid a self interrupt if the target is all-including-self, + // because the self IPI was delivered above. In the case of + // a logical cluster IPI, it is impractical to avoid the self + // interrupt, but such cases should be rare. + if hv_icr.destination_shorthand() == IcrDestFmt::AllWithSelf { + hv_icr.set_destination_shorthand(IcrDestFmt::AllButSelf); + } + + let _r = SVSM_PLATFORM.as_dyn_ref().post_irq(hv_icr.into()); + assert!(_r.is_ok()); + } + } + + pub fn read_register<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + ) -> Result<u64, ApicError> { + // Rewind any undelivered interrupt so it is reflected in any register + // read. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_APIC_ID => Ok(u64::from(cpu_shared.apic_id())), + APIC_REGISTER_IRR_0..=APIC_REGISTER_IRR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.irr[index] as u64) + } + APIC_REGISTER_ISR_0..=APIC_REGISTER_ISR_7 => { + let offset = register - APIC_REGISTER_ISR_0; + Ok(self.get_isr(offset.try_into().unwrap()) as u64) + } + APIC_REGISTER_TMR_0..=APIC_REGISTER_TMR_7 => { + let offset = register - APIC_REGISTER_TMR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.tmr[index] as u64) + } + APIC_REGISTER_TPR => Ok(cpu_state.get_tpr() as u64), + APIC_REGISTER_PPR => Ok(self.get_ppr(cpu_state) as u64), + _ => Err(ApicError::ApicError), + } + } + + fn handle_icr_write(&mut self, value: u64) -> Result<(), ApicError> { + let icr = ApicIcr::from(value); + + // Verify that this message type is supported. + let valid_type = match icr.message_type() { + IcrMessageType::Fixed => { + // Only asserted edge-triggered interrupts can be handled. + !icr.trigger_mode() && icr.assert() + } + IcrMessageType::Nmi => true, + _ => false, + }; + + if !valid_type { + return Err(ApicError::ApicError); + } + + self.send_ipi(icr); + + Ok(()) + } + + pub fn write_register<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + value: u64, + ) -> Result<(), ApicError> { + // Rewind any undelivered interrupt so it is correctly processed by + // any register write. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_TPR => { + // TPR must be an 8-bit value. + if value > 0xFF { + Err(ApicError::ApicError) + } else { + cpu_state.set_tpr((value & 0xFF) as u8); + Ok(()) + }
```suggestion let tpr = u8::try_from(value).ok_or(ApicError::ApicError)?; cpu_state.set_tpr(tpr); Ok(()) ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,850 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for (i, irr) in self.irr.into_iter().enumerate().rev() { + if irr != 0 { + let bit_index = 31 - irr.leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + let new_index = self.isr_stack_index.checked_sub(1).unwrap(); + assert!(self.isr_stack.get(new_index) == Some(&irq)); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index = new_index; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + let virt_addr = caa_addr?; + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector and transfer any pending IPIs into the local + // IRR vector. + for (i, irr) in self.irr.iter_mut().enumerate() { + *irr |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) { + // Determine whether this interrupt can be injected + // immediately. If not, queue it for delivery when possible. + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + self.interrupt_delivered = true; + + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { + cpu_state.queue_interrupt(irq); + self.interrupt_queued = true; + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; + + // Mark this interrupt in-service. It will be recalled if + // the ISR is examined again before the interrupt is actually + // delivered. + Self::remove_vector_register(&mut self.irr, irq); + self.isr_stack[self.isr_stack_index] = irq; + self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + if let Some(calling_area) = guest_caa { + if let Ok(caa) = calling_area.read() { + if calling_area.write(caa.update_no_eoi_required(1)).is_ok() { + // Only track a pending lazy EOI if the + // calling area page could successfully be + // updated. + self.lazy_eoi_pending = true; + } + } + } + } + } + } + } + } + + fn perform_host_eoi(vector: u8) { + // Errors from the host are not expected and cannot be meaningfully + // handled, so simply ignore them. + let _r = current_ghcb().specific_eoi(vector, GUEST_VMPL.try_into().unwrap()); + assert!(_r.is_ok()); + } + + pub fn perform_eoi(&mut self) { + // Pop any in-service interrupt from the stack, and schedule the APIC + // for reevaluation. + if self.isr_stack_index != 0 { + self.isr_stack_index -= 1; + let vector = self.isr_stack[self.isr_stack_index]; + if Self::test_vector_register(&self.tmr, vector) { + if Self::test_vector_register(&self.host_tmr, vector) { + Self::perform_host_eoi(vector); + Self::remove_vector_register(&mut self.host_tmr, vector); + } else { + // FIXME: should do something with locally generated + // level-sensitive interrupts. + } + Self::remove_vector_register(&mut self.tmr, vector); + } + self.update_required = true; + self.lazy_eoi_pending = false; + } + } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for isr in self.isr_stack.into_iter().take(self.isr_stack_index) { + if (usize::from(isr >> 5)) == index { + value |= 1 << (isr & 0x1F) + } + } + value + } + + fn post_interrupt(&mut self, irq: u8, level_sensitive: bool) { + // Set the appropriate bit in the IRR. Once set, signal that interrupt + // processing is required before returning to the guest. + Self::insert_vector_register(&mut self.irr, irq); + if level_sensitive { + Self::insert_vector_register(&mut self.tmr, irq); + } + self.update_required = true; + } + + fn post_icr_interrupt(&mut self, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + self.nmi_pending = true; + self.update_required = true; + } else { + self.post_interrupt(icr.vector(), false); + } + } + + fn post_ipi_one_target(cpu: &PerCpuShared, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + cpu.request_nmi(); + } else { + cpu.request_ipi(icr.vector()); + } + } + + fn send_logical_ipi(&mut self, icr: ApicIcr) -> bool { + let mut signal = false; + + // Check whether the current CPU matches the destination. + let destination = icr.destination(); + let apic_id = this_cpu().get_apic_id(); + if Self::logical_destination_match(destination, apic_id) { + self.post_icr_interrupt(icr); + } + + // Enumerate all CPUs to see which have APIC IDs that match the + // requested destination. Skip the current CPU, since it was checked + // above. + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + let this_apic_id = cpu.apic_id(); + if (this_apic_id != apic_id) + && Self::logical_destination_match(destination, this_apic_id) + { + Self::post_ipi_one_target(cpu, icr); + signal = true; + } + } + + signal + } + + fn logical_destination_match(destination: u32, apic_id: u32) -> bool { + // CHeck for a cluster match. + if (destination >> 16) != (apic_id >> 4) { + false + } else { + let bit = 1u32 << (apic_id & 0xF); + (destination & bit) != 0 + } + } + + fn send_physical_ipi(&mut self, icr: ApicIcr) -> bool { + // If the target APIC ID matches the current processor, then treat this + // as a self-IPI. Otherwise, locate the target processor by APIC ID. + let destination = icr.destination(); + if destination == this_cpu().get_apic_id() { + self.post_interrupt(icr.vector(), false); + false + } else { + // If the target CPU cannot be located, then simply drop the + // request. + if let Some(cpu) = PERCPU_AREAS.get(destination) { + cpu.request_ipi(icr.vector()); + true + } else { + false + } + } + } + + fn send_ipi(&mut self, icr: ApicIcr) { + let (signal_host, include_others, include_self) = match icr.destination_shorthand() { + IcrDestFmt::Dest => { + if icr.destination() == 0xFFFF_FFFF { + // This is a broadcast, so treat it as all with self. + (true, true, true) + } else { + let signal_host = if icr.destination_mode() { + self.send_logical_ipi(icr) + } else { + self.send_physical_ipi(icr) + }; + + // Any possible self-IPI was handled above as part of + // delivery to the correct destination. + (signal_host, false, false) + } + } + IcrDestFmt::OnlySelf => (false, false, true), + IcrDestFmt::AllButSelf => (true, true, false), + IcrDestFmt::AllWithSelf => (true, true, true), + }; + + if include_others { + // Enumerate all processors in the system except for the + // current CPU and indicate that an IPI has been requested. + let apic_id = this_cpu().get_apic_id(); + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + if cpu.apic_id() != apic_id { + Self::post_ipi_one_target(cpu, icr); + } + } + } + + if include_self { + self.post_icr_interrupt(icr); + } + + if signal_host { + // Calculate an ICR value to use for a host IPI request. This will + // be a fixed interrupt on the interrupt notification vector using + // the destination format specified in the ICR value. + let mut hv_icr = ApicIcr::new() + .with_vector(INT_INJ_VECTOR as u8) + .with_message_type(IcrMessageType::Fixed) + .with_destination_mode(icr.destination_mode()) + .with_destination_shorthand(icr.destination_shorthand()) + .with_destination(icr.destination()); + + // Avoid a self interrupt if the target is all-including-self, + // because the self IPI was delivered above. In the case of + // a logical cluster IPI, it is impractical to avoid the self + // interrupt, but such cases should be rare. + if hv_icr.destination_shorthand() == IcrDestFmt::AllWithSelf { + hv_icr.set_destination_shorthand(IcrDestFmt::AllButSelf); + } + + let _r = SVSM_PLATFORM.as_dyn_ref().post_irq(hv_icr.into()); + assert!(_r.is_ok()); + } + } + + pub fn read_register<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + ) -> Result<u64, ApicError> { + // Rewind any undelivered interrupt so it is reflected in any register + // read. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_APIC_ID => Ok(u64::from(cpu_shared.apic_id())), + APIC_REGISTER_IRR_0..=APIC_REGISTER_IRR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.irr[index] as u64) + } + APIC_REGISTER_ISR_0..=APIC_REGISTER_ISR_7 => { + let offset = register - APIC_REGISTER_ISR_0; + Ok(self.get_isr(offset.try_into().unwrap()) as u64) + } + APIC_REGISTER_TMR_0..=APIC_REGISTER_TMR_7 => { + let offset = register - APIC_REGISTER_TMR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.tmr[index] as u64) + } + APIC_REGISTER_TPR => Ok(cpu_state.get_tpr() as u64), + APIC_REGISTER_PPR => Ok(self.get_ppr(cpu_state) as u64), + _ => Err(ApicError::ApicError), + } + } + + fn handle_icr_write(&mut self, value: u64) -> Result<(), ApicError> { + let icr = ApicIcr::from(value); + + // Verify that this message type is supported. + let valid_type = match icr.message_type() { + IcrMessageType::Fixed => { + // Only asserted edge-triggered interrupts can be handled. + !icr.trigger_mode() && icr.assert() + } + IcrMessageType::Nmi => true, + _ => false, + }; + + if !valid_type { + return Err(ApicError::ApicError); + } + + self.send_ipi(icr); + + Ok(()) + } + + pub fn write_register<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + value: u64, + ) -> Result<(), ApicError> { + // Rewind any undelivered interrupt so it is correctly processed by + // any register write. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_TPR => { + // TPR must be an 8-bit value. + if value > 0xFF { + Err(ApicError::ApicError) + } else { + cpu_state.set_tpr((value & 0xFF) as u8); + Ok(()) + } + } + APIC_REGISTER_EOI => { + self.perform_eoi(); + Ok(()) + } + APIC_REGISTER_ICR => self.handle_icr_write(value), + APIC_REGISTER_SELF_IPI => { + if value > 0xFF { + Err(ApicError::ApicError) + } else { + self.post_interrupt((value & 0xFF) as u8, false); + Ok(()) + }
Same here, lets make it more concise: ```suggestion let value = u8::try_from(value).ok_or(ApicError::ApicError)?; self.post_interrupt(value); Ok(()) ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,850 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for (i, irr) in self.irr.into_iter().enumerate().rev() { + if irr != 0 { + let bit_index = 31 - irr.leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + let new_index = self.isr_stack_index.checked_sub(1).unwrap(); + assert!(self.isr_stack.get(new_index) == Some(&irq)); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index = new_index; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + let virt_addr = caa_addr?; + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector and transfer any pending IPIs into the local + // IRR vector. + for (i, irr) in self.irr.iter_mut().enumerate() { + *irr |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) { + // Determine whether this interrupt can be injected + // immediately. If not, queue it for delivery when possible. + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + self.interrupt_delivered = true; + + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { + cpu_state.queue_interrupt(irq); + self.interrupt_queued = true; + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; + + // Mark this interrupt in-service. It will be recalled if + // the ISR is examined again before the interrupt is actually + // delivered. + Self::remove_vector_register(&mut self.irr, irq); + self.isr_stack[self.isr_stack_index] = irq; + self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + if let Some(calling_area) = guest_caa { + if let Ok(caa) = calling_area.read() { + if calling_area.write(caa.update_no_eoi_required(1)).is_ok() { + // Only track a pending lazy EOI if the + // calling area page could successfully be + // updated. + self.lazy_eoi_pending = true; + } + } + } + } + } + } + } + } + + fn perform_host_eoi(vector: u8) { + // Errors from the host are not expected and cannot be meaningfully + // handled, so simply ignore them. + let _r = current_ghcb().specific_eoi(vector, GUEST_VMPL.try_into().unwrap()); + assert!(_r.is_ok()); + } + + pub fn perform_eoi(&mut self) { + // Pop any in-service interrupt from the stack, and schedule the APIC + // for reevaluation. + if self.isr_stack_index != 0 { + self.isr_stack_index -= 1; + let vector = self.isr_stack[self.isr_stack_index]; + if Self::test_vector_register(&self.tmr, vector) { + if Self::test_vector_register(&self.host_tmr, vector) { + Self::perform_host_eoi(vector); + Self::remove_vector_register(&mut self.host_tmr, vector); + } else { + // FIXME: should do something with locally generated + // level-sensitive interrupts. + } + Self::remove_vector_register(&mut self.tmr, vector); + } + self.update_required = true; + self.lazy_eoi_pending = false; + } + } + + fn get_isr(&self, index: usize) -> u32 { + let mut value = 0; + for isr in self.isr_stack.into_iter().take(self.isr_stack_index) { + if (usize::from(isr >> 5)) == index { + value |= 1 << (isr & 0x1F) + } + } + value + } + + fn post_interrupt(&mut self, irq: u8, level_sensitive: bool) { + // Set the appropriate bit in the IRR. Once set, signal that interrupt + // processing is required before returning to the guest. + Self::insert_vector_register(&mut self.irr, irq); + if level_sensitive { + Self::insert_vector_register(&mut self.tmr, irq); + } + self.update_required = true; + } + + fn post_icr_interrupt(&mut self, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + self.nmi_pending = true; + self.update_required = true; + } else { + self.post_interrupt(icr.vector(), false); + } + } + + fn post_ipi_one_target(cpu: &PerCpuShared, icr: ApicIcr) { + if icr.message_type() == IcrMessageType::Nmi { + cpu.request_nmi(); + } else { + cpu.request_ipi(icr.vector()); + } + } + + fn send_logical_ipi(&mut self, icr: ApicIcr) -> bool { + let mut signal = false; + + // Check whether the current CPU matches the destination. + let destination = icr.destination(); + let apic_id = this_cpu().get_apic_id(); + if Self::logical_destination_match(destination, apic_id) { + self.post_icr_interrupt(icr); + } + + // Enumerate all CPUs to see which have APIC IDs that match the + // requested destination. Skip the current CPU, since it was checked + // above. + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + let this_apic_id = cpu.apic_id(); + if (this_apic_id != apic_id) + && Self::logical_destination_match(destination, this_apic_id) + { + Self::post_ipi_one_target(cpu, icr); + signal = true; + } + } + + signal + } + + fn logical_destination_match(destination: u32, apic_id: u32) -> bool { + // CHeck for a cluster match. + if (destination >> 16) != (apic_id >> 4) { + false + } else { + let bit = 1u32 << (apic_id & 0xF); + (destination & bit) != 0 + } + } + + fn send_physical_ipi(&mut self, icr: ApicIcr) -> bool { + // If the target APIC ID matches the current processor, then treat this + // as a self-IPI. Otherwise, locate the target processor by APIC ID. + let destination = icr.destination(); + if destination == this_cpu().get_apic_id() { + self.post_interrupt(icr.vector(), false); + false + } else { + // If the target CPU cannot be located, then simply drop the + // request. + if let Some(cpu) = PERCPU_AREAS.get(destination) { + cpu.request_ipi(icr.vector()); + true + } else { + false + } + } + } + + fn send_ipi(&mut self, icr: ApicIcr) { + let (signal_host, include_others, include_self) = match icr.destination_shorthand() { + IcrDestFmt::Dest => { + if icr.destination() == 0xFFFF_FFFF { + // This is a broadcast, so treat it as all with self. + (true, true, true) + } else { + let signal_host = if icr.destination_mode() { + self.send_logical_ipi(icr) + } else { + self.send_physical_ipi(icr) + }; + + // Any possible self-IPI was handled above as part of + // delivery to the correct destination. + (signal_host, false, false) + } + } + IcrDestFmt::OnlySelf => (false, false, true), + IcrDestFmt::AllButSelf => (true, true, false), + IcrDestFmt::AllWithSelf => (true, true, true), + }; + + if include_others { + // Enumerate all processors in the system except for the + // current CPU and indicate that an IPI has been requested. + let apic_id = this_cpu().get_apic_id(); + for cpu_ref in PERCPU_AREAS.iter() { + let cpu = cpu_ref.unwrap(); + if cpu.apic_id() != apic_id { + Self::post_ipi_one_target(cpu, icr); + } + } + } + + if include_self { + self.post_icr_interrupt(icr); + } + + if signal_host { + // Calculate an ICR value to use for a host IPI request. This will + // be a fixed interrupt on the interrupt notification vector using + // the destination format specified in the ICR value. + let mut hv_icr = ApicIcr::new() + .with_vector(INT_INJ_VECTOR as u8) + .with_message_type(IcrMessageType::Fixed) + .with_destination_mode(icr.destination_mode()) + .with_destination_shorthand(icr.destination_shorthand()) + .with_destination(icr.destination()); + + // Avoid a self interrupt if the target is all-including-self, + // because the self IPI was delivered above. In the case of + // a logical cluster IPI, it is impractical to avoid the self + // interrupt, but such cases should be rare. + if hv_icr.destination_shorthand() == IcrDestFmt::AllWithSelf { + hv_icr.set_destination_shorthand(IcrDestFmt::AllButSelf); + } + + let _r = SVSM_PLATFORM.as_dyn_ref().post_irq(hv_icr.into()); + assert!(_r.is_ok()); + } + } + + pub fn read_register<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + ) -> Result<u64, ApicError> { + // Rewind any undelivered interrupt so it is reflected in any register + // read. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_APIC_ID => Ok(u64::from(cpu_shared.apic_id())), + APIC_REGISTER_IRR_0..=APIC_REGISTER_IRR_7 => { + let offset = register - APIC_REGISTER_IRR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.irr[index] as u64) + } + APIC_REGISTER_ISR_0..=APIC_REGISTER_ISR_7 => { + let offset = register - APIC_REGISTER_ISR_0; + Ok(self.get_isr(offset.try_into().unwrap()) as u64) + } + APIC_REGISTER_TMR_0..=APIC_REGISTER_TMR_7 => { + let offset = register - APIC_REGISTER_TMR_0; + let index: usize = offset.try_into().unwrap(); + Ok(self.tmr[index] as u64) + } + APIC_REGISTER_TPR => Ok(cpu_state.get_tpr() as u64), + APIC_REGISTER_PPR => Ok(self.get_ppr(cpu_state) as u64), + _ => Err(ApicError::ApicError), + } + } + + fn handle_icr_write(&mut self, value: u64) -> Result<(), ApicError> { + let icr = ApicIcr::from(value); + + // Verify that this message type is supported. + let valid_type = match icr.message_type() { + IcrMessageType::Fixed => { + // Only asserted edge-triggered interrupts can be handled. + !icr.trigger_mode() && icr.assert() + } + IcrMessageType::Nmi => true, + _ => false, + }; + + if !valid_type { + return Err(ApicError::ApicError); + } + + self.send_ipi(icr); + + Ok(()) + } + + pub fn write_register<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + register: u64, + value: u64, + ) -> Result<(), ApicError> { + // Rewind any undelivered interrupt so it is correctly processed by + // any register write. + self.check_delivered_interrupts(cpu_state, caa_addr); + + match register { + APIC_REGISTER_TPR => { + // TPR must be an 8-bit value. + if value > 0xFF { + Err(ApicError::ApicError) + } else { + cpu_state.set_tpr((value & 0xFF) as u8); + Ok(()) + } + } + APIC_REGISTER_EOI => { + self.perform_eoi(); + Ok(()) + } + APIC_REGISTER_ICR => self.handle_icr_write(value), + APIC_REGISTER_SELF_IPI => { + if value > 0xFF { + Err(ApicError::ApicError) + } else { + self.post_interrupt((value & 0xFF) as u8, false); + Ok(()) + } + } + _ => Err(ApicError::ApicError), + } + } + + pub fn configure_vector(&mut self, vector: u8, allowed: bool) { + let index = (vector >> 5) as usize; + let mask = 1 << (vector & 31); + if allowed { + self.allowed_irr[index] |= mask; + } else { + self.allowed_irr[index] &= !mask; + } + } + + fn signal_one_host_interrupt(&mut self, vector: u8, level_sensitive: bool) -> bool { + let index = (vector >> 5) as usize; + let mask = 1 << (vector & 31); + if (self.allowed_irr[index] & mask) != 0 { + self.post_interrupt(vector, level_sensitive); + true + } else { + false + } + } + + fn signal_several_interrupts(&mut self, group: usize, mut bits: u32) { + let vector = (group as u8) << 5; + while bits != 0 { + let index = 31 - bits.leading_zeros(); + bits &= !(1 << index); + self.post_interrupt(vector + index as u8, false); + } + } + + pub fn consume_host_interrupts(&mut self) { + let hv_doorbell = this_cpu().hv_doorbell().unwrap(); + let vmpl_event_mask = hv_doorbell.per_vmpl_events.swap(0, Ordering::Relaxed); + // Ignore events other than for the guest VMPL. + if vmpl_event_mask & (1 << (GUEST_VMPL - 1)) != 0 {
Let's return early here, e.g. ```rust // Ignore events other than for the guest VMPL. if vmpl_event_mask & (1 << (GUEST_VMPL - 1)) == 0 { return; } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,850 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for (i, irr) in self.irr.into_iter().enumerate().rev() { + if irr != 0 { + let bit_index = 31 - irr.leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + let new_index = self.isr_stack_index.checked_sub(1).unwrap(); + assert!(self.isr_stack.get(new_index) == Some(&irq)); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index = new_index; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + let virt_addr = caa_addr?; + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector and transfer any pending IPIs into the local + // IRR vector. + for (i, irr) in self.irr.iter_mut().enumerate() { + *irr |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) {
Let's return early here, e.g. ```rust if (irq & 0xF0) <= (current_priority & 0xF0) { return; } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -0,0 +1,850 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) Microsoft Corporation +// +// Author: Jon Lange (jlange@microsoft.com) + +use crate::address::VirtAddr; +use crate::cpu::idt::common::INT_INJ_VECTOR; +use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpuShared, PERCPU_AREAS}; +use crate::mm::GuestPtr; +use crate::platform::guest_cpu::GuestCpuState; +use crate::platform::SVSM_PLATFORM; +use crate::requests::SvsmCaa; +use crate::sev::hv_doorbell::HVExtIntStatus; +use crate::types::GUEST_VMPL; + +use bitfield_struct::bitfield; +use core::sync::atomic::Ordering; + +const APIC_REGISTER_APIC_ID: u64 = 0x802; +const APIC_REGISTER_TPR: u64 = 0x808; +const APIC_REGISTER_PPR: u64 = 0x80A; +const APIC_REGISTER_EOI: u64 = 0x80B; +const APIC_REGISTER_ISR_0: u64 = 0x810; +const APIC_REGISTER_ISR_7: u64 = 0x817; +const APIC_REGISTER_TMR_0: u64 = 0x818; +const APIC_REGISTER_TMR_7: u64 = 0x81F; +const APIC_REGISTER_IRR_0: u64 = 0x820; +const APIC_REGISTER_IRR_7: u64 = 0x827; +const APIC_REGISTER_ICR: u64 = 0x830; +const APIC_REGISTER_SELF_IPI: u64 = 0x83F; + +#[derive(Debug, PartialEq)] +enum IcrDestFmt { + Dest = 0, + OnlySelf = 1, + AllWithSelf = 2, + AllButSelf = 3, +} + +impl IcrDestFmt { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 3 => Self::AllButSelf, + 2 => Self::AllWithSelf, + 1 => Self::OnlySelf, + _ => Self::Dest, + } + } +} + +#[derive(Debug, PartialEq)] +enum IcrMessageType { + Fixed = 0, + Unknown = 3, + Nmi = 4, + Init = 5, + Sipi = 6, + ExtInt = 7, +} + +impl IcrMessageType { + const fn into_bits(self) -> u64 { + self as _ + } + const fn from_bits(value: u64) -> Self { + match value { + 7 => Self::ExtInt, + 6 => Self::Sipi, + 5 => Self::Init, + 4 => Self::Nmi, + 0 => Self::Fixed, + _ => Self::Unknown, + } + } +} + +#[bitfield(u64)] +struct ApicIcr { + pub vector: u8, + #[bits(3)] + pub message_type: IcrMessageType, + pub destination_mode: bool, + pub delivery_status: bool, + rsvd_13: bool, + pub assert: bool, + pub trigger_mode: bool, + #[bits(2)] + pub remote_read_status: usize, + #[bits(2)] + pub destination_shorthand: IcrDestFmt, + #[bits(12)] + rsvd_31_20: u64, + pub destination: u32, +} + +#[derive(Clone, Copy, Debug)] +pub enum ApicError { + ApicError, +} + +#[derive(Default, Clone, Copy, Debug)] +pub struct LocalApic { + irr: [u32; 8], + allowed_irr: [u32; 8], + isr_stack_index: usize, + isr_stack: [u8; 16], + tmr: [u32; 8], + host_tmr: [u32; 8], + update_required: bool, + interrupt_delivered: bool, + interrupt_queued: bool, + lazy_eoi_pending: bool, + nmi_pending: bool, +} + +impl LocalApic { + pub fn new() -> Self { + LocalApic { + irr: [0; 8], + allowed_irr: [0; 8], + isr_stack_index: 0, + isr_stack: [0; 16], + tmr: [0; 8], + host_tmr: [0; 8], + update_required: false, + interrupt_delivered: false, + interrupt_queued: false, + lazy_eoi_pending: false, + nmi_pending: false, + } + } + + fn scan_irr(&self) -> u8 { + // Scan to find the highest pending IRR vector. + for (i, irr) in self.irr.into_iter().enumerate().rev() { + if irr != 0 { + let bit_index = 31 - irr.leading_zeros(); + let vector = (i as u32) * 32 + bit_index; + return vector.try_into().unwrap(); + } + } + 0 + } + + fn remove_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] &= !(1 << (irq & 31)); + } + + fn insert_vector_register(register: &mut [u32; 8], irq: u8) { + register[irq as usize >> 5] |= 1 << (irq & 31); + } + + fn test_vector_register(register: &[u32; 8], irq: u8) -> bool { + (register[irq as usize >> 5] & 1 << (irq & 31)) != 0 + } + + fn rewind_pending_interrupt(&mut self, irq: u8) { + let new_index = self.isr_stack_index.checked_sub(1).unwrap(); + assert!(self.isr_stack.get(new_index) == Some(&irq)); + Self::insert_vector_register(&mut self.irr, irq); + self.isr_stack_index = new_index; + self.update_required = true; + } + + pub fn check_delivered_interrupts<T: GuestCpuState>( + &mut self, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Check to see if a previously delivered interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_delivered { + let irq = cpu_state.check_and_clear_pending_interrupt_event(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_delivered = false; + } + + // Check to see if a previously queued interrupt is still pending. + // If so, move it back to the IRR. + if self.interrupt_queued { + let irq = cpu_state.check_and_clear_pending_virtual_interrupt(); + if irq != 0 { + self.rewind_pending_interrupt(irq); + self.lazy_eoi_pending = false; + } + self.interrupt_queued = false; + } + + // If a lazy EOI is pending, then check to see whether an EOI has been + // requested by the guest. Note that if a lazy EOI was dismissed + // above, the guest lazy EOI flag need not be cleared here, since + // dismissal of any interrupt above will require reprocessing of + // interrupt state prior to guest reentry, and that reprocessing will + // reset the guest lazy EOI flag. + if self.lazy_eoi_pending { + if let Some(virt_addr) = caa_addr { + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + if let Ok(caa) = calling_area.read() { + if caa.no_eoi_required == 0 { + assert!(self.isr_stack_index != 0); + self.perform_eoi(); + } + } + } + } + } + + fn get_ppr_with_tpr(&self, tpr: u8) -> u8 { + // Determine the priority of the current in-service interrupt, if any. + let ppr = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index] + } else { + 0 + }; + + // The PPR is the higher of the in-service interrupt priority and the + // task priority. + if (ppr >> 4) > (tpr >> 4) { + ppr + } else { + tpr + } + } + + fn get_ppr<T: GuestCpuState>(&self, cpu_state: &T) -> u8 { + self.get_ppr_with_tpr(cpu_state.get_tpr()) + } + + fn clear_guest_eoi_pending(caa_addr: Option<VirtAddr>) -> Option<GuestPtr<SvsmCaa>> { + let virt_addr = caa_addr?; + let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr); + // Ignore errors here, since nothing can be done if an error occurs. + if let Ok(caa) = calling_area.read() { + let _ = calling_area.write(caa.update_no_eoi_required(0)); + } + Some(calling_area) + } + + fn deliver_interrupt_immediately<T: GuestCpuState>( + &mut self, + irq: u8, + cpu_state: &mut T, + ) -> bool { + if !cpu_state.interrupts_enabled() || cpu_state.in_intr_shadow() { + false + } else { + // This interrupt can only be delivered if it is a higher priority + // than the processor's current priority. + let ppr = self.get_ppr(cpu_state); + if (irq >> 4) <= (ppr >> 4) { + false + } else { + cpu_state.try_deliver_interrupt_immediately(irq) + } + } + } + + pub fn consume_pending_ipis(&mut self, cpu_shared: &PerCpuShared) { + // Scan the IPI IRR vector and transfer any pending IPIs into the local + // IRR vector. + for (i, irr) in self.irr.iter_mut().enumerate() { + *irr |= cpu_shared.ipi_irr_vector(i); + } + if cpu_shared.nmi_pending() { + self.nmi_pending = true; + } + self.update_required = true; + } + + pub fn present_interrupts<T: GuestCpuState>( + &mut self, + cpu_shared: &PerCpuShared, + cpu_state: &mut T, + caa_addr: Option<VirtAddr>, + ) { + // Make sure any interrupts being presented by the host have been + // consumed. + self.consume_host_interrupts(); + + // Consume any pending IPIs. + if cpu_shared.ipi_pending() { + self.consume_pending_ipis(cpu_shared); + } + + if self.update_required { + // Make sure that all previously delivered interrupts have been + // processed before attempting to process any more. + self.check_delivered_interrupts(cpu_state, caa_addr); + self.update_required = false; + + // If an NMI is pending, then present it first. + if self.nmi_pending { + cpu_state.request_nmi(); + self.nmi_pending = false; + } + + let irq = self.scan_irr(); + let current_priority = if self.isr_stack_index != 0 { + self.isr_stack[self.isr_stack_index - 1] + } else { + 0 + }; + + // Assume no lazy EOI can be attempted unless it is recalculated + // below. + self.lazy_eoi_pending = false; + let guest_caa = Self::clear_guest_eoi_pending(caa_addr); + + // This interrupt is a candidate for delivery only if its priority + // exceeds the priority of the highest priority interrupt currently + // in service. This check does not consider TPR, because an + // interrupt lower in priority than TPR must be queued for delivery + // as soon as TPR is lowered. + if (irq & 0xF0) > (current_priority & 0xF0) { + // Determine whether this interrupt can be injected + // immediately. If not, queue it for delivery when possible. + let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) { + self.interrupt_delivered = true; + + // Use of lazy EOI can safely be attempted, because the + // highest priority interrupt in service is unambiguous. + true + } else { + cpu_state.queue_interrupt(irq); + self.interrupt_queued = true; + + // A lazy EOI can only be attempted if there is no lower + // priority interrupt in service. If a lower priority + // interrupt is in service, then the lazy EOI handler + // won't know whether the lazy EOI is for the one that + // is already in service or the one that is being queued + // here. + self.isr_stack_index == 0 + }; + + // Mark this interrupt in-service. It will be recalled if + // the ISR is examined again before the interrupt is actually + // delivered. + Self::remove_vector_register(&mut self.irr, irq); + self.isr_stack[self.isr_stack_index] = irq; + self.isr_stack_index += 1; + + // Configure a lazy EOI if possible. Lazy EOI is not possible + // for level-sensitive interrupts, because an explicit EOI + // is required to acknowledge the interrupt at the source. + if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) { + // A lazy EOI is possible only if there is no other + // interrupt pending. If another interrupt is pending, + // then an explicit EOI will be required to prompt + // delivery of the next interrupt. + if self.scan_irr() == 0 { + if let Some(calling_area) = guest_caa { + if let Ok(caa) = calling_area.read() { + if calling_area.write(caa.update_no_eoi_required(1)).is_ok() { + // Only track a pending lazy EOI if the + // calling area page could successfully be + // updated. + self.lazy_eoi_pending = true; + } + } + } + } + } + } + } + } + + fn perform_host_eoi(vector: u8) { + // Errors from the host are not expected and cannot be meaningfully + // handled, so simply ignore them. + let _r = current_ghcb().specific_eoi(vector, GUEST_VMPL.try_into().unwrap()); + assert!(_r.is_ok()); + } + + pub fn perform_eoi(&mut self) { + // Pop any in-service interrupt from the stack, and schedule the APIC + // for reevaluation. + if self.isr_stack_index != 0 {
Let's return early here, e.g. ```rust if self.isr_stack_index == 0 { return; } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -53,6 +57,10 @@ impl PerCpuInfo { cpu_shared, } } + + pub fn unwrap(&self) -> &'static PerCpuShared { + self.cpu_shared + }
Let's name this something else, it's too similar to `Option::unwrap()` or `Result::unwrap()`.
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -201,18 +214,39 @@ impl GuestVmsaRef { #[derive(Debug)] pub struct PerCpuShared { + apic_id: u32, guest_vmsa: SpinLock<GuestVmsaRef>, online: AtomicBool, + ipi_irr: [AtomicU32; 8], + ipi_pending: AtomicBool, + nmi_pending: AtomicBool, } impl PerCpuShared { - fn new() -> Self { + fn new(apic_id: u32) -> Self { PerCpuShared { + apic_id, guest_vmsa: SpinLock::new(GuestVmsaRef::new()), online: AtomicBool::new(false), + ipi_irr: [ + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + AtomicU32::new(0), + ],
```suggestion ipi_irr: core::array::from_fn(|_| AtomicU32::new(0)), ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -270,6 +330,8 @@ pub struct PerCpu { runqueue: RefCell<RunQueue>, /// WaitQueue for request processing request_waitqueue: RefCell<WaitQueue>, + /// Local APIC state for APIC emulation + apic: RefCell<LocalApic>,
I wonder if we should have it be `RefCell<Option<LocalApic>>`. That way the type itself conveys whether APIC emulation is enabled, so we could get rid of the `apic_emulation` field. This would also force callers to check the `Option`, avoiding future mistakes where the `apic_emulation` field is not checked. Enabling and disabling would look like this: ```rust // enable self.apic.borrow_mut().insert(LocalApic::new()); // disable self.apic.borrow_mut().take(); ``` What do you think?
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -426,6 +489,19 @@ impl PerCpu { Ok(()) } + pub fn hv_doorbell(&self) -> Option<&'static HVDoorbell> { + unsafe { + let hv_doorbell = self.hv_doorbell.get(); + if hv_doorbell.is_null() { + None + } else { + // The HV doorbell page can only ever be borrowed shared, never + // mutable, and can safely have a static lifetime. + Some(&*hv_doorbell) + } + }
[`as_ref()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref) already does the null check for us: ```suggestion // SAFETY: The HV doorbell page can only ever be borrowed shared, never // mutable, and can safely have a static lifetime. unsafe { self.hv_doorbell.get().as_ref() } ```
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -614,6 +699,69 @@ impl PerCpu { Ok(()) } + pub fn disable_apic_emulation(&self) -> Result<(), SvsmError> { + if self.apic_emulation.get() { + // APIC emulation cannot be disabled if the platform has locked + // the use of APIC emulation. + SVSM_PLATFORM.as_dyn_ref().disable_apic_emulation()?; + let mut vmsa_ref = self.guest_vmsa_ref(); + let caa_addr = vmsa_ref.caa_addr(); + let vmsa = vmsa_ref.vmsa(); + self.apic + .borrow_mut() + .disable_apic_emulation(vmsa, caa_addr); + drop(vmsa_ref); + + self.apic_emulation.set(false); + } + Ok(())
We could return early here, but also with the change I proposed around wrapping `LocalApic` in an `Option` it looks even better: ```rust pub fn disable_apic_emulation(&self) -> Result<(), SvsmError> { let Some(apic) = self.apic.borrow_mut().take() else { return Ok(()); }; // ... Ok(()) } ``` This pattern naturally also happens in some of the other PerCpu methods using the APIC.
svsm
github_2023
others
368
coconut-svsm
00xc
@@ -614,6 +699,69 @@ impl PerCpu { Ok(()) } + pub fn disable_apic_emulation(&self) -> Result<(), SvsmError> { + if self.apic_emulation.get() { + // APIC emulation cannot be disabled if the platform has locked + // the use of APIC emulation. + SVSM_PLATFORM.as_dyn_ref().disable_apic_emulation()?; + let mut vmsa_ref = self.guest_vmsa_ref(); + let caa_addr = vmsa_ref.caa_addr(); + let vmsa = vmsa_ref.vmsa(); + self.apic + .borrow_mut() + .disable_apic_emulation(vmsa, caa_addr); + drop(vmsa_ref); + + self.apic_emulation.set(false); + } + Ok(()) + } + + pub fn clear_pending_interrupts(&self) { + if self.apic_emulation.get() { + let mut vmsa_ref = self.guest_vmsa_ref(); + let caa_addr = vmsa_ref.caa_addr(); + let vmsa = vmsa_ref.vmsa(); + self.apic + .borrow_mut() + .check_delivered_interrupts(vmsa, caa_addr); + } + } + + pub fn update_apic_emulation(&self, vmsa: &mut VMSA, caa_addr: Option<VirtAddr>) { + if self.apic_emulation.get() { + self.apic + .borrow_mut() + .present_interrupts(self.shared(), vmsa, caa_addr); + } + } + + pub fn use_apic_emulation(&self) -> bool { + self.apic_emulation.get() + } + + pub fn read_apic_register(&self, register: u64) -> Result<u64, ApicError> { + let mut vmsa_ref = self.guest_vmsa_ref(); + let caa_addr = vmsa_ref.caa_addr(); + let vmsa = vmsa_ref.vmsa(); + self.apic + .borrow_mut() + .read_register(self.shared(), vmsa, caa_addr, register)
For example, here we are not performing the emulation enablement check, which looks inconsistent compared to other methods. It's fine, because we are doing it in the request handling function (`apic_read_register()`), and before that in `apic_protocol_request()`, before calling into this method, but you can see why it's an odd pattern: sometimes we check the state within the `PerCpu` methods, and sometimes from outside, and sometimes we are checking it twice! Using an `Option` would make sure we always make the check once and just once. This also happens a few lines below with `PerCpu::write_apic_register()` and `PerCpu::configure_apic_vector()`, and their respective calls from SVSM request handling functions.
svsm
github_2023
others
366
coconut-svsm
Freax13
@@ -186,20 +196,14 @@ impl IDT { unsafe { asm!("lidt (%rax)", in("rax") &desc, options(att_syntax)); } - - self } +} +impl ReadLockGuard<'static, IDT> { pub fn base_limit(&self) -> (u64, u32) { - let base = (self as *const IDT) as u64; + let base: *const IDT = core::ptr::from_ref(self);
```suggestion let base: *const IDT = self; ``` Unless your intention is to be explicit, `core::ptr::from_ref` isn't required for converting a reference to its pointer type.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -45,14 +45,10 @@ impl StackUnwinder { options(att_syntax)); }; - let (top_of_init_stack, top_of_df_stack, current_stack) = unsafe { - let cpu_unsafe = &*this_cpu_unsafe(); - ( - cpu_unsafe.get_top_of_stack(), - cpu_unsafe.get_top_of_df_stack(), - cpu_unsafe.get_current_stack(), - ) - }; + let cpu = this_cpu();
This appears to be subject to reentrancy problems. The stack unwinder can be called from the panic handler, and the panic handler can be invoked while the current CPU is mutably borrowed. The reference to `this_cpu()` would create a borrow conflict resulting in a nested panic. The reason I put the stack limits into the unsafe portion of the CPU was specifically to ensure that panic handler would never panic itself, i.e. that no data references could ever result in borrow checking. I think we need to adhere to that model if we want panic handling to behave reasonably.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -250,154 +249,108 @@ impl PerCpuShared { } } +const _: () = assert!(size_of::<PerCpu>() <= PAGE_SIZE); + #[derive(Debug)] -pub struct PerCpuUnsafe { +pub struct PerCpu { shared: PerCpuShared, - private: RefCell<PerCpu>, - ghcb: *mut GHCB, - hv_doorbell: *mut HVDoorbell, - init_stack: Option<VirtAddr>, + + apic_id: u32, + pgtbl: RefCell<PageTableRef>, + tss: Cell<X86Tss>, + svsm_vmsa: Cell<Option<VmsaRef>>, + reset_ip: Cell<u64>, + /// PerCpu Virtual Memory Range + vm_range: VMR,
It would be really helpful to have comments about why some of these fields are not protected with a `Cell`. While I suspect it has to do with which structures support interior mutability and can always be accessed through a shared reference, this fact is not immediately clear from reading the code here.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -250,154 +249,108 @@ impl PerCpuShared { } } +const _: () = assert!(size_of::<PerCpu>() <= PAGE_SIZE); + #[derive(Debug)] -pub struct PerCpuUnsafe { +pub struct PerCpu { shared: PerCpuShared,
Is `PerCpuShared` still necessary if the entire structure is shared-only? I pushed this initially so that cross-CPU access (via shared references) was always possible even if the owning CPU held a mutable reference, but now that this is no longer possible, it seems like we no longer need a separate structure.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -250,154 +249,108 @@ impl PerCpuShared { } } +const _: () = assert!(size_of::<PerCpu>() <= PAGE_SIZE); + #[derive(Debug)] -pub struct PerCpuUnsafe { +pub struct PerCpu { shared: PerCpuShared, - private: RefCell<PerCpu>, - ghcb: *mut GHCB, - hv_doorbell: *mut HVDoorbell, - init_stack: Option<VirtAddr>, + + apic_id: u32, + pgtbl: RefCell<PageTableRef>, + tss: Cell<X86Tss>, + svsm_vmsa: Cell<Option<VmsaRef>>, + reset_ip: Cell<u64>, + /// PerCpu Virtual Memory Range + vm_range: VMR, + /// Address allocator for per-cpu 4k temporary mappings + pub vrange_4k: RefCell<VirtualRange>, + /// Address allocator for per-cpu 2m temporary mappings + pub vrange_2m: RefCell<VirtualRange>, + /// Task list that has been assigned for scheduling on this CPU + runqueue: RefCell<RunQueue>, + /// WaitQueue for request processing + request_waitqueue: RefCell<WaitQueue>, + + ghcb: Cell<Option<&'static GHCB>>, + hv_doorbell: Cell<*const HVDoorbell>, + init_stack: Cell<Option<VirtAddr>>, ist: IstStacks, - /// Stack boundaries of the currently running task. This is stored in - /// [PerCpuUnsafe] because it needs lockless read access. - current_stack: MemoryRegion<VirtAddr>, + /// Stack boundaries of the currently running task. + current_stack: Cell<MemoryRegion<VirtAddr>>, } -impl PerCpuUnsafe { - pub fn new(apic_id: u32, cpu_unsafe_ptr: *const PerCpuUnsafe) -> Self { +impl PerCpu { + fn new(apic_id: u32) -> Self { Self { - private: RefCell::new(PerCpu::new(apic_id, cpu_unsafe_ptr)), + apic_id, + pgtbl: RefCell::new(PageTableRef::unset()), + tss: Cell::new(X86Tss::new()), + svsm_vmsa: Cell::new(None), + reset_ip: Cell::new(0xffff_fff0), + vm_range: VMR::new(SVSM_PERCPU_BASE, SVSM_PERCPU_END, PTEntryFlags::GLOBAL), + vrange_4k: RefCell::new(VirtualRange::new()), + vrange_2m: RefCell::new(VirtualRange::new()), + runqueue: RefCell::new(RunQueue::new()), + request_waitqueue: RefCell::new(WaitQueue::new()), + shared: PerCpuShared::new(), - ghcb: ptr::null_mut(), - hv_doorbell: ptr::null_mut(), - init_stack: None, + ghcb: Cell::new(None), + hv_doorbell: Cell::new(ptr::null()), + init_stack: Cell::new(None), ist: IstStacks::new(), - current_stack: MemoryRegion::new(VirtAddr::null(), 0), + current_stack: Cell::new(MemoryRegion::new(VirtAddr::null(), 0)), } } - pub fn alloc(apic_id: u32) -> Result<*mut PerCpuUnsafe, SvsmError> { + pub fn alloc(apic_id: u32) -> Result<&'static Self, SvsmError> { let vaddr = allocate_zeroed_page()?; + let percpu_ptr = vaddr.as_mut_ptr::<Self>(); unsafe { - // Within each CPU state page, the first portion is the private - // mutable state and remainder is the shared state. - let unsafe_size = size_of::<PerCpuUnsafe>(); - let private_size = size_of::<PerCpu>(); - if unsafe_size + private_size > PAGE_SIZE { - panic!("Per-CPU data is larger than one page!"); - } - let percpu_unsafe = vaddr.as_mut_ptr::<PerCpuUnsafe>(); - - (*percpu_unsafe) = PerCpuUnsafe::new(apic_id, percpu_unsafe); - - PERCPU_AREAS.push(PerCpuInfo::new(apic_id, &(*percpu_unsafe).shared)); - Ok(percpu_unsafe) + (*percpu_ptr) = Self::new(apic_id); + let percpu = &*percpu_ptr; + PERCPU_AREAS.push(PerCpuInfo::new(apic_id, &percpu.shared)); + Ok(percpu) } } pub fn shared(&self) -> &PerCpuShared { &self.shared } - pub fn cpu(&self) -> Ref<'_, PerCpu> { - self.private.borrow() - } - - pub fn cpu_mut(&self) -> RefMut<'_, PerCpu> { - self.private.borrow_mut() - } - - pub fn setup_ghcb(&mut self) -> Result<(), SvsmError> { - let ghcb_page = allocate_zeroed_page().expect("Failed to allocate GHCB page"); + pub fn setup_ghcb(&self) -> Result<(), SvsmError> { + let ghcb_page = allocate_zeroed_page()?; if let Err(e) = GHCB::init(ghcb_page) { free_page(ghcb_page); return Err(e); }; - self.ghcb = ghcb_page.as_mut_ptr(); + let ghcb = unsafe { &*ghcb_page.as_ptr() }; + self.ghcb.set(Some(ghcb)); Ok(()) } - pub fn ghcb_unsafe(&self) -> *mut GHCB { - self.ghcb + fn ghcb(&self) -> Option<&'static GHCB> { + self.ghcb.get() } - pub fn hv_doorbell_unsafe(&self) -> *mut HVDoorbell { - self.hv_doorbell + pub fn hv_doorbell_unsafe(&self) -> *const HVDoorbell {
Can we reconsider whether to keep this unsafe accessor? This is written this way largely to follow the `PerCpuUnsafe` pattern, but the `HVDoorbell` structure is marked as `const` (and handles its own interior mutability through `Sync`-safe structures like atomics) so changing this to return a `&'static HVDoorbell` would probably be an improvement.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -250,154 +249,108 @@ impl PerCpuShared { } } +const _: () = assert!(size_of::<PerCpu>() <= PAGE_SIZE); + #[derive(Debug)] -pub struct PerCpuUnsafe { +pub struct PerCpu { shared: PerCpuShared, - private: RefCell<PerCpu>, - ghcb: *mut GHCB, - hv_doorbell: *mut HVDoorbell, - init_stack: Option<VirtAddr>, + + apic_id: u32, + pgtbl: RefCell<PageTableRef>, + tss: Cell<X86Tss>, + svsm_vmsa: Cell<Option<VmsaRef>>,
Does this actually need a `Cell`? I believe this is only ever written to while the CPU is being constructed, at which point a mutable reference should be safe (because it cannot possibly be referenced by anyone other than the constructing code). Once the CPU is constructed, and becomes visible in multiple placesk, this should just be constant and should not require interior mutability.
svsm
github_2023
others
372
coconut-svsm
msft-jlange
@@ -16,27 +15,22 @@ use crate::utils::immut_after_init::immut_after_init_set_multithreaded; fn start_cpu(platform: &dyn SvsmPlatform, apic_id: u32, vtom: u64) { let start_rip: u64 = (start_ap as *const u8) as u64; - let mut percpu = PerCpu::alloc(apic_id).expect("Failed to allocate AP per-cpu data");
I'm not convinced this is the right approach. During construction, when the CPU is not visible to other threads, it seems perfectly reasonable for this reference to be mutable to facilitate construction. The existing code makes this unnecessarily messy by having `PerCpu::alloc()` take responsibility for inserting the newly constructed CPU into the `PERCPU_AREAS` table, which I think is not appropriate. It would be much better to define an explicit method like `PerCpu::insert(cpu: PerCpu)` which would insert the CPU into the global table, and which takes an actual `PerCpu` (not a reference) to force a move to invalidate any reference that the caller has prior to insertion. This pattern maintains reference safety without introducing interior mutability on fields that really should be constant following construction. That said, I'm not certain this will work because the `PerCpu` allocation requires specific memory allocation attributes and therefore the move may not be compatible, but I still think we could some up with something (like a `PerCpuConstructor` or something that holds the actual reference) that would permit mutability during construction and only during construction.
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -36,6 +39,23 @@ impl<const T: usize> FixedString<T> { pub fn length(&self) -> usize { self.len } + + pub fn as_str(&self) -> String { + let mut terminator = false; + let s: String = self + .data + .iter() + .filter(|c| { + if terminator || **c == '\0' { + terminator = true; + false + } else { + true + } + }) + .collect(); + s + }
Actually `FixedString` implements `fmt::Display`, which automatically includes an implementation for `to_string()`, so this method is not needed: https://doc.rust-lang.org/std/fmt/trait.Display.html
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -1489,26 +1498,32 @@ impl Elf64LoadSegments { /// /// # Parameters /// - `segment`: An [`Elf64AddrRange`] representing the address range of the segment to insert. + /// - `file_range`: An [`Elf64FileRange`] representing the range of the segment being inserted. /// - `phdr_index`: An [`Elf64Half`] representing the program header index associated with /// the segment. /// /// # Returns /// Returns [`Ok`] if the insertion is successful and there is no overlap with existing - fn try_insert(&mut self, segment: Elf64AddrRange, phdr_index: Elf64Half) -> Result<(), ()> { + fn try_insert( + &mut self, + segment: Elf64AddrRange, + file_range: Elf64FileRange, + phdr_index: Elf64Half, + ) -> Result<(), ()> {
This is breaking tests I think
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -35,8 +35,10 @@ pub enum SvsmError { Acpi, // Errors from file systems FileSystem(FsError), - // Task management errors, + // Task management errors Task(TaskError), // Errors from #VC handler Vc(VcError), + // Errors related to modules
```suggestion // Errors related to loadable modules ```
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + }
```suggestion let Ok(mut m) = Module::load(path.as_str()) else { log::info!("Module {} load failed", path); continue; } create_task_for_module(&mut m, 0, None)?; modules.push(m); log::info!("Module {} loaded ok", path); ```
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + } + } + Ok(Self { modules }) + } +} + +#[derive(Debug)] +pub struct Module { + file_segments: Vec<(VirtAddr, Arc<Mapping>)>, + entry_point: extern "C" fn(), + task_node: Option<Arc<TaskNode>>, +} + +struct SegmentInfo {
I think we can derive Clone, Copy and Debug here
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + } + } + Ok(Self { modules }) + } +} + +#[derive(Debug)] +pub struct Module { + file_segments: Vec<(VirtAddr, Arc<Mapping>)>, + entry_point: extern "C" fn(), + task_node: Option<Arc<TaskNode>>, +} + +struct SegmentInfo { + vaddr: VirtAddr, + file_offset: usize, + size: usize, + flags: VMFileMappingPermission, +} + +impl Module { + pub fn entry_point(&self) -> extern "C" fn() { + self.entry_point + } + + pub fn assign(&mut self, task_node: Arc<TaskNode>) -> Result<(), SvsmError> { + self.task_node = Some(task_node.clone()); + let mut task = task_node.task.lock_write();
This avoids a clone of the `Arc`: ```suggestion let task_node = self.task_node.insert(task_node); let mut task = task_node.task.lock_write(); ```
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + } + } + Ok(Self { modules }) + } +} + +#[derive(Debug)] +pub struct Module { + file_segments: Vec<(VirtAddr, Arc<Mapping>)>, + entry_point: extern "C" fn(), + task_node: Option<Arc<TaskNode>>, +} + +struct SegmentInfo { + vaddr: VirtAddr, + file_offset: usize, + size: usize, + flags: VMFileMappingPermission, +} + +impl Module { + pub fn entry_point(&self) -> extern "C" fn() { + self.entry_point + } + + pub fn assign(&mut self, task_node: Arc<TaskNode>) -> Result<(), SvsmError> { + self.task_node = Some(task_node.clone()); + let mut task = task_node.task.lock_write(); + for (vaddr, segment) in &self.file_segments { + task.vmr_user().insert_at(*vaddr, segment.clone())?; + } + Ok(()) + } + + fn get_segment_info(path: &str) -> Result<(extern "C" fn(), Vec<SegmentInfo>), SvsmError> { + // Temporarily map the file's physical memory into the percpu virtual address space + let file = open(path)?; + let file_size = file.size(); + let file_mapping = Arc::new(Mapping::new(VMFileMapping::new( + file, + 0, + file_size, + crate::mm::vm::VMFileMappingPermission::Read, + )?)); + let mapping = this_cpu_mut().new_mapping(file_mapping)?; + + let buf = + unsafe { core::slice::from_raw_parts_mut(mapping.virt_addr().as_mut_ptr(), file_size) }; + + let elf = match elf::Elf64File::read(buf) { + Ok(elf) => elf, + Err(_) => return Err(SvsmError::Module), + }; + let default_base = elf.default_base(); + let entry_point = unsafe { core::mem::transmute(elf.get_entry(default_base) as *const ()) };
I think this deserves a comment explaining the conversion.
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + } + } + Ok(Self { modules }) + } +} + +#[derive(Debug)] +pub struct Module { + file_segments: Vec<(VirtAddr, Arc<Mapping>)>, + entry_point: extern "C" fn(), + task_node: Option<Arc<TaskNode>>, +} + +struct SegmentInfo { + vaddr: VirtAddr, + file_offset: usize, + size: usize, + flags: VMFileMappingPermission, +} + +impl Module { + pub fn entry_point(&self) -> extern "C" fn() { + self.entry_point + } + + pub fn assign(&mut self, task_node: Arc<TaskNode>) -> Result<(), SvsmError> { + self.task_node = Some(task_node.clone()); + let mut task = task_node.task.lock_write(); + for (vaddr, segment) in &self.file_segments { + task.vmr_user().insert_at(*vaddr, segment.clone())?; + } + Ok(()) + } + + fn get_segment_info(path: &str) -> Result<(extern "C" fn(), Vec<SegmentInfo>), SvsmError> { + // Temporarily map the file's physical memory into the percpu virtual address space + let file = open(path)?; + let file_size = file.size(); + let file_mapping = Arc::new(Mapping::new(VMFileMapping::new( + file, + 0, + file_size, + crate::mm::vm::VMFileMappingPermission::Read, + )?)); + let mapping = this_cpu_mut().new_mapping(file_mapping)?; + + let buf = + unsafe { core::slice::from_raw_parts_mut(mapping.virt_addr().as_mut_ptr(), file_size) }; + + let elf = match elf::Elf64File::read(buf) { + Ok(elf) => elf, + Err(_) => return Err(SvsmError::Module), + }; + let default_base = elf.default_base(); + let entry_point = unsafe { core::mem::transmute(elf.get_entry(default_base) as *const ()) }; + + let mut info = Vec::<SegmentInfo>::new();
Nit: the compiler can already infer the `SegmentInfo` generic.
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Roy Hopkins <rhopkins@suse.de> + +extern crate alloc; + +use crate::address::VirtAddr; +use crate::cpu::percpu::this_cpu_mut; +use crate::elf; +use crate::error::SvsmError; +use crate::fs::{list_dir, open}; +use crate::mm::vm::{Mapping, VMFileMapping, VMFileMappingPermission}; +use crate::task::{create_task_for_module, TaskNode}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[derive(Debug)] +pub struct ModuleLoader { + pub modules: Vec<Module>, +} + +impl ModuleLoader { + pub fn enumerate() -> Result<Self, SvsmError> { + let mut modules: Vec<Module> = Vec::new(); + + let module_files = list_dir("/modules")?; + for module in module_files { + let path = String::from("/modules/") + &module.as_str(); + // Each module is an ELF file + let module = Module::load(path.as_str()); + match module { + Ok(mut m) => { + create_task_for_module(&mut m, 0, None)?; + modules.push(m); + log::info!("Module {} loaded ok", path); + } + Err(_) => log::info!("Module {} load failed", path), + } + } + Ok(Self { modules }) + } +} + +#[derive(Debug)] +pub struct Module { + file_segments: Vec<(VirtAddr, Arc<Mapping>)>, + entry_point: extern "C" fn(), + task_node: Option<Arc<TaskNode>>, +} + +struct SegmentInfo { + vaddr: VirtAddr, + file_offset: usize, + size: usize, + flags: VMFileMappingPermission, +} + +impl Module { + pub fn entry_point(&self) -> extern "C" fn() { + self.entry_point + } + + pub fn assign(&mut self, task_node: Arc<TaskNode>) -> Result<(), SvsmError> { + self.task_node = Some(task_node.clone()); + let mut task = task_node.task.lock_write(); + for (vaddr, segment) in &self.file_segments { + task.vmr_user().insert_at(*vaddr, segment.clone())?; + } + Ok(()) + } + + fn get_segment_info(path: &str) -> Result<(extern "C" fn(), Vec<SegmentInfo>), SvsmError> { + // Temporarily map the file's physical memory into the percpu virtual address space + let file = open(path)?; + let file_size = file.size(); + let file_mapping = Arc::new(Mapping::new(VMFileMapping::new( + file, + 0, + file_size, + crate::mm::vm::VMFileMappingPermission::Read, + )?)); + let mapping = this_cpu_mut().new_mapping(file_mapping)?; + + let buf = + unsafe { core::slice::from_raw_parts_mut(mapping.virt_addr().as_mut_ptr(), file_size) }; + + let elf = match elf::Elf64File::read(buf) { + Ok(elf) => elf, + Err(_) => return Err(SvsmError::Module), + }; + let default_base = elf.default_base(); + let entry_point = unsafe { core::mem::transmute(elf.get_entry(default_base) as *const ()) }; + + let mut info = Vec::<SegmentInfo>::new(); + + // Setup the pagetable for the virtual memory ranges described in the file + for segment in elf.image_load_segment_iter(default_base) { + let vaddr_start = VirtAddr::from(segment.vaddr_range.vaddr_begin); + let flags = if segment.flags.contains(elf::Elf64PhdrFlags::EXECUTE) { + VMFileMappingPermission::Execute + } else if segment.flags.contains(elf::Elf64PhdrFlags::WRITE) { + VMFileMappingPermission::Write + } else { + VMFileMappingPermission::Read + }; + + info.push(SegmentInfo { + vaddr: vaddr_start, + file_offset: segment.file_range.offset_begin, + size: segment.file_range.offset_end - segment.file_range.offset_begin, + flags, + }); + } + Ok((entry_point, info)) + } + + fn load(path: &str) -> Result<Self, SvsmError> { + let (entry_point, segments) = Module::get_segment_info(path)?; + let mut file_segments = Vec::<(VirtAddr, Arc<Mapping>)>::new();
Same here, the compiler can infer the generic.
svsm
github_2023
others
158
coconut-svsm
00xc
@@ -260,6 +279,28 @@ impl Task { self.vm_kernel_range.handle_page_fault(vaddr, write) } + pub fn vmr_user(&mut self) -> &mut VMR { + &mut self.vm_user_range + } + + pub fn virtual_alloc( + &mut self, + size_bytes: usize, + _alignment: usize, + ) -> Result<VirtAddr, SvsmError> { + // Each bit in our bitmap represents a 4K page + if (size_bytes & (PAGE_SIZE - 1)) != 0 { + return Err(SvsmError::Mem); + } + let _page_count = size_bytes >> PAGE_SHIFT; + // TODO: Implement virtual_alloc + Err(SvsmError::Mem) + } + + pub fn virtual_free(&mut self, _vaddr: VirtAddr, _size_bytes: usize) { + // TODO: Implement virtual_free
```suggestion todo!("Implement virtual_free"); ```
svsm
github_2023
others
377
coconut-svsm
msft-jlange
@@ -163,7 +165,8 @@ pub fn sev_status_verify() { | SEVStatusFlags::REST_INJ | SEVStatusFlags::PREV_HOST_IBS | SEVStatusFlags::BTB_ISOLATION - | SEVStatusFlags::VMSA_REG_PROT; + | SEVStatusFlags::VMSA_REG_PROT + | SEVStatusFlags::SMT_PROT;
Enabling register protection will cause the guest VMSA contents to be scrambled such that they are unusable without significant extra code, which I don't see as part of this PR. It is not safe to declare this feature as supported without having such code in the SVSM or else nothing will work.
svsm
github_2023
others
377
coconut-svsm
stefano-garzarella
@@ -78,7 +78,17 @@ pub fn init_svsm_vmsa(vmsa: &mut VMSA, vtom: u64) { vmsa.vmpl = 0; vmsa.vtom = vtom; - vmsa.sev_features = sev_flags().as_sev_features(); + let sev_status = sev_flags(); + + if sev_status.contains(SEVStatusFlags::VMSA_REG_PROT) { + let nonce = rdrand::RdRand::new()
IIRC when I tried `rdrand/rdseed` from that crate (some months ago) I had an issue with SVSM, since they use `cpuid` to check if the instruction is supported, but in SVSM we didn't support `cpuid`, so I added a `new_unchecked()` API: https://github.com/nagisa/rust_rdrand/pull/21 Is `rdrand::RdRand::new()` working well now?
svsm
github_2023
others
364
coconut-svsm
p4zuu
@@ -384,28 +384,24 @@ impl GHCB { Ok(()) } - fn write_buffer<T>(&mut self, data: &T, offset: isize) -> Result<(), GhcbError> + fn write_buffer<T>(&mut self, data: &T, offset: usize) -> Result<(), GhcbError> where - T: Sized, + T: Copy, { - let size: isize = mem::size_of::<T>() as isize; - - if offset < 0 || offset + size > (GHCB_BUFFER_SIZE as isize) { + offset + .checked_add(mem::size_of::<T>()) + .filter(|end| *end <= GHCB_BUFFER_SIZE) + .ok_or(GhcbError::InvalidOffset)?; + + // SAFETY: we have verified that the offset is within bounds and does + // not overflow + let dst = unsafe { self.buffer.as_mut_ptr().add(offset) }; + if dst.align_offset(mem::align_of::<T>()) != 0 { return Err(GhcbError::InvalidOffset); } - unsafe { - let dst = self - .buffer - .as_mut_ptr() - .cast::<u8>() - .offset(offset) - .cast::<T>(); - let src = data as *const T; - - ptr::copy_nonoverlapping(src, dst, 1); - } - + // SAFETY: we have verified the pointer is aligned and within bounds. + unsafe { dst.cast::<T>().copy_from_nonoverlapping(data, 1) }
Would it make sense to also check the alignment of `data`? In other words, could the caller trick this to give something not T-aligned?
svsm
github_2023
others
358
coconut-svsm
00xc
@@ -0,0 +1,210 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2024 Intel Corporation +// +// Author: Chuanxiao Dong <chuanxiao.dong@intel.com> + +use super::decode::OpCodeBytes; +use bitflags::bitflags; + +bitflags! { + #[derive(Clone, Copy, Debug, Default, PartialEq)] + pub struct OpCodeFlags: u64 { + // Immediate operand with decoded size + const IMM = 1 << 0; + // U8 immediate operand + const IMM8 = 1 << 1; + // No need to decode ModRm + const NO_MODRM = 1 << 2; + // Operand size is one byte + const BYTE_OP = 1 << 3; + // Operand size is two byte + const WORD_OP = 1 << 4; + // Doesn't have an operand + const OP_NONE = 1 << 5; + // Need to decode Moffset + const MOFFSET = 1 << 6; + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum OpCodeClass { + Cpuid, + Group7, + Group7Rm7, + In, + Out, + Rdmsr, + Rdtsc, + Rdtscp, + TwoByte, + Wrmsr, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct OpCodeDesc {
Please add some documentation for pub items like these
svsm
github_2023
others
358
coconut-svsm
00xc
@@ -0,0 +1,377 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Thomas Leroy <tleroy@suse.de> + +use super::decode::DecodedInsnCtx; +use super::{InsnError, InsnMachineCtx}; +use crate::types::Bytes; + +/// An immediate value in an instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Immediate { + U8(u8), + U16(u16), + U32(u32), +} + +/// A register in an instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Register { + Rax, + Rcx, + Rdx, + Rbx, + Rsp, + Rbp, + Rsi, + Rdi, + R8, + R9, + R10, + R11, + R12, + R13, + R14, + R15, + Rip, +} + +/// A Segment register in instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SegRegister { + CS, + SS, + DS, + ES, + FS, + GS, +} + +/// An operand in an instruction, which might be a register or an immediate. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Operand { + Reg(Register), + Imm(Immediate), +} + +impl Operand { + #[inline] + pub const fn rdx() -> Self { + Self::Reg(Register::Rdx) + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum DecodedInsn { + Cpuid, + In(Operand, Bytes), + Out(Operand, Bytes), + Wrmsr, + Rdmsr, + Rdtsc, + Rdtscp, +} + +pub const MAX_INSN_SIZE: usize = 15; + +/// A view of an x86 instruction. +#[derive(Default, Debug, Copy, Clone, PartialEq)] +pub struct Instruction([u8; MAX_INSN_SIZE]); + +impl Instruction { + pub const fn new(bytes: [u8; MAX_INSN_SIZE]) -> Self { + Self(bytes) + } + + /// Decode the instruction with the given InsnMachineCtx. + /// + /// # Returns + /// + /// A [`DecodedInsnCtx`] if the instruction is supported, or an [`InsnError`] otherwise. + pub fn decode(&self, mctx: &dyn InsnMachineCtx) -> Result<DecodedInsnCtx, InsnError> { + DecodedInsnCtx::new(&self.0, mctx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cpu::control_regs::{CR0Flags, CR4Flags}; + use crate::cpu::efer::EFERFlags; + + #[derive(Debug)] + struct TestCtx; + + impl InsnMachineCtx for TestCtx { + fn read_efer(&self) -> u64 { + EFERFlags::LMA.bits() + } + + fn read_seg(&self, seg: SegRegister) -> u64 { + match seg { + SegRegister::CS => 0x00af9a000000ffffu64, + _ => 0x00cf92000000ffffu64, + } + } + + fn read_cr0(&self) -> u64 { + CR0Flags::PE.bits() + } + + fn read_cr4(&self) -> u64 { + CR4Flags::LA57.bits() + } + }
Could you move this outside the test module and gate it behind `#[cfg(any(test, fuzzing))]`? This way if you make it `pub` we should be able to import it from the fuzzing harness.
svsm
github_2023
others
358
coconut-svsm
00xc
@@ -0,0 +1,377 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Thomas Leroy <tleroy@suse.de> + +use super::decode::DecodedInsnCtx; +use super::{InsnError, InsnMachineCtx}; +use crate::types::Bytes; + +/// An immediate value in an instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Immediate { + U8(u8), + U16(u16), + U32(u32), +} + +/// A register in an instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Register { + Rax, + Rcx, + Rdx, + Rbx, + Rsp, + Rbp, + Rsi, + Rdi, + R8, + R9, + R10, + R11, + R12, + R13, + R14, + R15, + Rip, +} + +/// A Segment register in instruction +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SegRegister { + CS, + SS, + DS, + ES, + FS, + GS, +} + +/// An operand in an instruction, which might be a register or an immediate. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Operand { + Reg(Register), + Imm(Immediate), +} + +impl Operand { + #[inline] + pub const fn rdx() -> Self { + Self::Reg(Register::Rdx) + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum DecodedInsn { + Cpuid, + In(Operand, Bytes), + Out(Operand, Bytes), + Wrmsr, + Rdmsr, + Rdtsc, + Rdtscp, +} + +pub const MAX_INSN_SIZE: usize = 15; + +/// A view of an x86 instruction. +#[derive(Default, Debug, Copy, Clone, PartialEq)] +pub struct Instruction([u8; MAX_INSN_SIZE]); + +impl Instruction { + pub const fn new(bytes: [u8; MAX_INSN_SIZE]) -> Self { + Self(bytes) + } + + /// Decode the instruction with the given InsnMachineCtx. + /// + /// # Returns + /// + /// A [`DecodedInsnCtx`] if the instruction is supported, or an [`InsnError`] otherwise. + pub fn decode(&self, mctx: &dyn InsnMachineCtx) -> Result<DecodedInsnCtx, InsnError> { + DecodedInsnCtx::new(&self.0, mctx) + }
Any reason we're not using static dispatch here? ```diff diff --git a/kernel/src/insn_decode/decode.rs b/kernel/src/insn_decode/decode.rs index 6097fef..bd3c871 100644 --- a/kernel/src/insn_decode/decode.rs +++ b/kernel/src/insn_decode/decode.rs @@ -133,7 +133,7 @@ impl CpuMode { } } -fn get_cpu_mode(mctx: &dyn InsnMachineCtx) -> CpuMode { +fn get_cpu_mode<I: InsnMachineCtx>(mctx: &I) -> CpuMode { if (mctx.read_efer() & EFERFlags::LMA.bits()) != 0 { // EFER.LMA = 1 if (mctx.read_seg(SegRegister::CS) & SegDescAttrFlags::L.bits()) != 0 { @@ -339,9 +339,9 @@ pub struct DecodedInsnCtx { } impl DecodedInsnCtx { - pub(super) fn new( + pub(super) fn new<I: InsnMachineCtx>( bytes: &[u8; MAX_INSN_SIZE], - mctx: &dyn InsnMachineCtx, + mctx: &I, ) -> Result<Self, InsnError> { let mut insn_ctx = Self { cpu_mode: get_cpu_mode(mctx), @@ -359,10 +359,10 @@ impl DecodedInsnCtx { self.insn_len } - fn decode( + fn decode<I: InsnMachineCtx>( &mut self, bytes: &[u8; MAX_INSN_SIZE], - mctx: &dyn InsnMachineCtx, + mctx: &I, ) -> Result<(), InsnError> { self.decode_prefixes(bytes, mctx) .and_then(|insn| self.decode_opcode(insn)) @@ -452,10 +452,10 @@ impl DecodedInsnCtx { }; } - fn decode_prefixes( + fn decode_prefixes<I: InsnMachineCtx>( &mut self, bytes: &[u8; MAX_INSN_SIZE], - mctx: &dyn InsnMachineCtx, + mctx: &I, ) -> Result<OpCodeBytes, InsnError> { let mut insn = PrefixBytes(InsnBytes::new(*bytes)); for _ in 0..PREFIX_SIZE { diff --git a/kernel/src/insn_decode/insn.rs b/kernel/src/insn_decode/insn.rs index 54f1273..24429a9 100644 --- a/kernel/src/insn_decode/insn.rs +++ b/kernel/src/insn_decode/insn.rs @@ -90,7 +90,7 @@ impl Instruction { /// # Returns /// /// A [`DecodedInsnCtx`] if the instruction is supported, or an [`InsnError`] otherwise. - pub fn decode(&self, mctx: &dyn InsnMachineCtx) -> Result<DecodedInsnCtx, InsnError> { + pub fn decode<I: InsnMachineCtx>(&self, mctx: &I) -> Result<DecodedInsnCtx, InsnError> { DecodedInsnCtx::new(&self.0, mctx) } } ```
svsm
github_2023
others
358
coconut-svsm
00xc
@@ -0,0 +1,746 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2024 Intel Corporation. +// +// Author: Chuanxiao Dong <chuanxiao.dong@intel.com> +// +// The instruction decoding is implemented by refering instr_emul.c +// from the Arcn project, with some modifications. A copy of license +// is included below: +// +// Copyright (c) 2012 Sandvine, Inc. +// Copyright (c) 2012 NetApp, Inc. +// Copyright (c) 2017-2022 Intel Corporation. +// +// Aedistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// $FreeBSD$ +// +// The original file can be found in this repository: +// https://github.com/projectacrn/acrn-hypervisor/blob/master/hypervisor/ +// arch/x86/guest/instr_emul.c + +use super::insn::{DecodedInsn, Immediate, Operand, MAX_INSN_SIZE}; +use super::opcode::{OpCodeClass, OpCodeDesc, OpCodeFlags}; +use super::{InsnError, Register, SegRegister}; +use crate::cpu::control_regs::{CR0Flags, CR4Flags}; +use crate::cpu::efer::EFERFlags; +use crate::cpu::registers::SegDescAttrFlags; +use crate::types::Bytes; +use bitflags::bitflags; + +/// Undecoded instruction bytes +#[derive(Default, Debug, Copy, Clone, PartialEq)] +pub struct InsnBytes { + /// Raw instruction bytes + bytes: [u8; MAX_INSN_SIZE], + /// Number of instruction bytes being processed + nr_processed: usize, +} + +impl InsnBytes { + pub const fn new(bytes: [u8; MAX_INSN_SIZE]) -> Self { + Self { + bytes, + nr_processed: 0, + } + } + + /// Get a single unprocessed instruction byte. + /// + /// # Returns + /// + /// An instruction byte if success or an [`InsnError`] otherwise + pub fn peek(&self) -> Result<u8, InsnError> { + if self.nr_processed < self.bytes.len() { + Ok(self.bytes[self.nr_processed]) + } else { + Err(InsnError::InsnPeek) + }
Style nitpick: ```suggestion self.bytes .get(self.nr_processed) .copied() .ok_or(InsnError::InsnPeek) ```
svsm
github_2023
others
338
coconut-svsm
00xc
@@ -94,6 +99,20 @@ pub fn early_idt_init() { pub fn idt_init() { // Set IST vectors init_ist_vectors(); + + // Capture an address that can be used by assembly code to read the #HV + // doorbell page. The address of each CPU's doorbell page may be + // different, but the address of the field in the PerCpu structure that + // holds the actual pointer is constant across all CPUs, so that is the + // pointer that is actually captured. The address that is captured is + // stored as a usize instead of a typed value, because the declarations + // required for type safety here are cumbersome, and the assembly code + // that uses the value is not type safe in any case, so enforcing type + // safety on the pointer would offer no meaningful value. + unsafe { + let global_ref = ptr::addr_of!(HV_DOORBELL_ADDR) as usize; + *(global_ref as *mut usize) = (*this_cpu_unsafe()).hv_doorbell_addr(); + }
We can just do this if we set `HV_DOORBELL_ADDR` to be `static mut`. ```suggestion unsafe { let global_ref = ptr::addr_of_mut!(HV_DOORBELL_ADDR); *global_ref = (*this_cpu_unsafe()).hv_doorbell_addr(); } ```
svsm
github_2023
others
338
coconut-svsm
cxdong
@@ -25,51 +31,183 @@ pushq %r15 .endm -.macro pop_regs - popq %r15 - popq %r14 - popq %r13 - popq %r12 - popq %r11 - popq %r10 - popq %r9 - popq %r8 - popq %rbp - popq %rdi - popq %rsi - popq %rdx - popq %rcx - popq %rbx - popq %rax - - addq $16, %rsp /* Skip vector and error code */ -.endm - .macro default_entry_no_ist name: req handler:req error_code:req vector:req .globl asm_entry_\name asm_entry_\name: .if \error_code == 0 pushq $0 .endif - pushq $\vector push_regs + movl $\vector, %esi movq %rsp, %rdi call ex_handler_\handler jmp default_return .endm +.macro isr_entry_no_ist vector:req + .globl asm_entry_irq_\vector +asm_entry_irq_\vector:
I was going through the default_return, and it seems to me that a "push $0" may be needed here (representing an error code). I might be missing something, please correct me if my understanding is wrong. "push_regs" pushes 15 registers, but default_return checks CS from 17*8(rsp) and "begin_iret_return" increases the rsp by 16*8, which seems both assume this macro should push one more 64bit value to the stack?
svsm
github_2023
others
338
coconut-svsm
cxdong
@@ -445,6 +458,32 @@ impl PerCpu { } } + pub fn setup_hv_doorbell(&self) -> Result<(), SvsmError> { + let paddr = allocate_zeroed_page()?; + let ghcb = &mut current_ghcb(); + if let Err(e) = HVDoorbell::init(paddr, ghcb) { + free_page(paddr); + return Err(e); + } + + unsafe { + let cpu_unsafe = self.cpu_unsafe as *mut PerCpuUnsafe; + (*cpu_unsafe).hv_doorbell = paddr.as_mut_ptr::<HVDoorbell>(); + } + + Ok(()) + } + + pub fn configure_hv_doorbell(&self) -> Result<(), SvsmError> {
Wondering if this is something feasible to be implemented in the platform abstraction layer?
svsm
github_2023
others
338
coconut-svsm
00xc
@@ -94,6 +99,20 @@ pub fn early_idt_init() { pub fn idt_init() { // Set IST vectors init_ist_vectors(); + + // Capture an address that can be used by assembly code to read the #HV + // doorbell page. The address of each CPU's doorbell page may be + // different, but the address of the field in the PerCpu structure that + // holds the actual pointer is constant across all CPUs, so that is the + // pointer that is actually captured. The address that is captured is + // stored as a usize instead of a typed value, because the declarations + // required for type safety here are cumbersome, and the assembly code + // that uses the value is not type safe in any case, so enforcing type + // safety on the pointer would offer no meaningful value. + unsafe { + let global_ref = ptr::addr_of_mut!(HV_DOORBELL_ADDR); + *global_ref = (*this_cpu_unsafe()).hv_doorbell_addr();
Now that I think about it, is there any reason why we're not just doing this? ```rust HV_DOORBELL_ADDR = (*this_cpu_unsafe()).hv_doorbell_addr(); ``` For some reason I thought `HV_DOORBELL_ADDR` was an extern symbol, but it just being a global we can just directly assign.
svsm
github_2023
others
338
coconut-svsm
roy-hopkins
@@ -33,4 +44,46 @@ impl HVDoorbell { Ok(()) } + + pub fn process_pending_events(&self) { + // Clear the NoFurtherSignal bit before processing. If any additional + // signal comes in after processing has commenced, it may be missed by + // this loop, but it will be detected when interrupts are processed + // again. Also clear the NMI bit, since NMIs are not expected. + let no_further_signal_mask: u8 = HVDoorbellFlags::new() + .with_no_further_signal(true) + .with_nmi_pending(true) + .into(); + let flags = HVDoorbellFlags::from( + self.flags + .fetch_and(!no_further_signal_mask, Ordering::Relaxed), + ); + + // #MC handling is not possible, so panic if a machine check has + // occurred. + if flags.mc_pending() { + panic!("#MC exception delivered via #HV"); + } + + // Consume interrupts as long as they are available. + loop { + // Consume any interrupt that may be present. + let vector = self.vector.swap(0, Ordering::Relaxed); + if vector == 0 { + break; + } + common_isr_handler(vector as usize);
Reading through the GHCB specification and particularly the host pseudocode in section 5.5.1, it looks like this loop may not work as expected. Firstly, `no_further_signal` is cleared above before reading the vector and exchanging it with zero. This means that if a further NMI is pending then another #HV can be issued which can cause this code to re-enter. I'm not sure if that's a problem or not. Secondly, if I understand this correctly, as soon as the vector is swapped with 0 then the next vector that the host will populate in the doorbell page will result in a new #HV exception as both `PendingEvent.Vector==0` and `PendingEvent.NoFurtherSignal==0`. This will mean that it will be handled in a re-entered #HV handler meaning that the second iteration of this loop will always result in `vector == 0`. Finally, could the host potentially change the flags between reading them above and reading the vector? If so, should the vector and flags be read together atomically here and the flags checked again?
svsm
github_2023
others
354
coconut-svsm
vijaydhanraj
@@ -115,8 +116,12 @@ impl SvsmPlatform for SnpPlatform { } fn eoi(&self) { - // 0x80E is the X2APIC EOI MSR. - // Errors here cannot be handled but should not be grounds for panic. - let _ = current_ghcb().wrmsr(0x80E, 0); + // Issue an explicit EOI unless no explicit EOI is required. + if !current_hv_doorbell().no_eoi_required() { + // 0x80E is the X2APIC EOI MSR.
Both AMD and Intel SDM mention X2APIC EOI MSR address as `0x80B`. Is `0x80E` intentional?
svsm
github_2023
others
354
coconut-svsm
vijaydhanraj
@@ -75,6 +76,43 @@ impl HVDoorbell { common_isr_handler(vector as usize); } } + + pub fn no_eoi_required(&self) -> bool { + // Check to see if the "no EOI required" flag is set to determine + // whether an explicit EOI can be avoided. + let mut no_eoi_required = self.no_eoi_required.load(Ordering::Relaxed); + loop { + // If the flag is not set, then an explicit EOI is required. + if (no_eoi_required & 1) == 0 { + return false; + } + // Attempt to atomically clear the flag. + match self.no_eoi_required.compare_exchange_weak( + no_eoi_required, + no_eoi_required & !1, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(new) => no_eoi_required = new, + } + } + + // If the flag was successfully cleareed, then no explicit EOI is
cleareed->cleared?
svsm
github_2023
others
354
coconut-svsm
vijaydhanraj
@@ -115,8 +116,12 @@ impl SvsmPlatform for SnpPlatform { } fn eoi(&self) { - // 0x80E is the X2APIC EOI MSR. - // Errors here cannot be handled but should not be grounds for panic. - let _ = current_ghcb().wrmsr(0x80E, 0); + // Issue an explicit EOI unless no explicit EOI is required. + if !current_hv_doorbell().no_eoi_required() { + // 0x80E is the X2APIC EOI MSR. + // Errors here cannot be handled but should not be grounds for + // panic. + let _ = current_ghcb().wrmsr(0x80E, 0);
Currently, code base has different implementations of write MSR like `write_msr()` and `wrmsr()`. Would implementing `wrmsr` as a platform abstracted trait function be better?
svsm
github_2023
others
342
coconut-svsm
stefano-garzarella
@@ -1,8 +1,10 @@ FEATURES ?= "default" SVSM_ARGS = --features ${FEATURES} -FEATURES_TEST ?= "default-test" -SVSM_ARGS_TEST = --no-default-features --features ${FEATURES_TEST} +SVSM_ARGS_TEST = --no-default-features +ifdef FEATURES_TEST + SVSM_ARGS_TEST := ${SVSM_ARGS_TEST} --features ${FEATURES_TEST}
What about using `+=` to concatenate the new part? ```suggestion SVSM_ARGS_TEST += --features ${FEATURES_TEST} ```
svsm
github_2023
others
272
coconut-svsm
stefano-garzarella
@@ -214,23 +242,28 @@ pub enum Mapping<'a> { Level0(&'a mut PTEntry), } +/// Page table structure containing a root page with multiple entries. #[repr(C)] #[derive(Default, Debug)] pub struct PageTable { root: PTPage, } impl PageTable { + /// Load the current page table into the CR3 register. pub fn load(&self) { write_cr3(self.cr3_value()); } + /// Get the CR3 register value for the current page table. pub fn cr3_value(&self) -> PhysAddr { let pgtable = VirtAddr::from(self as *const PageTable); let cr3 = virt_to_phys(pgtable); set_c_bit(cr3) } + /// Clone the shared part of the page table; excluding the private + /// parts.
```suggestion /// parts. /// /// # Errors /// Returns `SvsmError` if the page cannot be allocated. /// ```
svsm
github_2023
others
272
coconut-svsm
stefano-garzarella
@@ -296,6 +365,14 @@ impl PageTable { }; } + /// Walks the page table to find a mapping for a given virtual address. + /// + /// # Parameters
```suggestion /// # Parameters /// - `page`: A mutable reference to the root page table. ```
svsm
github_2023
others
272
coconut-svsm
stefano-garzarella
@@ -268,12 +326,22 @@ impl PageTable { Some(unsafe { &mut *address.as_mut_ptr::<PTPage>() }) } + /// Walks a page table at level 0 to find a mapping. + /// + /// # Parameters + /// - `page`: A mutable reference to the root page table. + /// - `vaddr`: The virtual address to find a mapping for. + /// + /// # Returns + /// A `Mapping` representing the found mapping. + /// fn walk_addr_lvl0(page: &mut PTPage, vaddr: VirtAddr) -> Mapping<'_> { let idx = PageTable::index::<0>(vaddr); Mapping::Level0(&mut page[idx]) } + /// Walks a page table at level 1 to find a mapping.
Should we add `# Parameters` and `# Returns` section also here, or is it redundant? Ditto for `walk_addr_lvl2`
svsm
github_2023
others
272
coconut-svsm
stefano-garzarella
@@ -307,6 +384,7 @@ impl PageTable { }; } + /// Walk the virtual address and return the corresponding mapping.
```suggestion /// Walk the virtual address and return the corresponding mapping. /// /// # Parameters /// - `vaddr`: The virtual address to find a mapping for. /// /// # Returns /// A `Mapping` representing the found mapping. /// ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -192,6 +217,7 @@ impl Default for PTPage { } } +/// Can be used to access page table entries by index. impl Index<usize> for PTPage { type Output = PTEntry;
This is a self-explanatory trait implementation, so I'd remove this.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -200,12 +226,14 @@ impl Index<usize> for PTPage { } } +/// Can be used to modify page table entries by index. impl IndexMut<usize> for PTPage { fn index_mut(&mut self, index: usize) -> &mut PTEntry {
Same thing, I'd say this is self-explanatory.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -245,19 +282,44 @@ impl PageTable { }) } + /// Copy an entry `entry` from another [`PageTable`].
```suggestion /// Copy an entry at index `entry` from another [`PageTable`]. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -245,19 +282,44 @@ impl PageTable { }) } + /// Copy an entry `entry` from another [`PageTable`]. pub fn copy_entry(&mut self, other: &PageTable, entry: usize) { self.root.entries[entry] = other.root.entries[entry]; } + /// Allocates a zeroed page table and returns a mutable pointer to it. + /// + /// # Errors + /// Returns `SvsmError` if the page cannot be allocated. + /// fn allocate_page_table() -> Result<*mut PTPage, SvsmError> { let ptr = allocate_zeroed_page()?; Ok(ptr.as_mut_ptr::<PTPage>()) } + /// Computes the index within a page table at the given level for a + /// virtual address `vaddr`.
```suggestion /// Computes the index within a page table at level `L` for a /// virtual address `vaddr`. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -431,6 +548,17 @@ impl PageTable { Ok(()) } + /// Splits a page into 4KB pages if it is part of a larger mapping. + /// + /// # Parameters + /// - `mapping`: The mapping to split. + /// + /// # Returns + /// A result indicating success or an error code. + /// + /// # Errors + /// Returns `SvsmError` if the operation fails. + ///
This function signature is pretty self-explanatory I'd say. ```suggestion /// Attempts to split a larger mapping into page-sized mappings. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -456,6 +584,17 @@ impl PageTable { entry.set(set_c_bit(addr), flags); } + /// Sets the shared state for a 4KB page. + /// + /// # Parameters + /// - `vaddr`: The virtual address of the page. + /// + /// # Returns + /// A result indicating success or an error code. + /// + /// # Errors + /// Returns `SvsmError` if the operation fails. + /// pub fn set_shared_4k(&mut self, vaddr: VirtAddr) -> Result<(), SvsmError> {
This is too verbose as well. ```suggestion /// Attempts to clear the C-bit for the page at `vaddr`. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -480,6 +630,14 @@ impl PageTable { } } + /// Checks the mapping of a given virtual address. + /// + /// # Parameters + /// - `vaddr`: The virtual address to check. + /// + /// # Returns + /// An option containing the physical address if a mapping exists, or `None`. + ///
```suggestion /// Gets the physical address of for a mapped `vaddr`, or `None` if /// no such mapping exists. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -865,6 +1238,13 @@ impl RawPageTablePart { } } + /// Unmaps a 2MB page. + /// + /// # Parameters + /// - `vaddr`: The virtual address of the mapping to unmap. + /// + /// # Returns + /// An optional [`PTEntry`] representing the unmapped page table entry. pub fn unmap_2m(&mut self, vaddr: VirtAddr) -> Option<PTEntry> { assert!(vaddr.is_aligned(PAGE_SIZE_2M));
Missing `# Panics` section.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -844,6 +1207,16 @@ impl RawPageTablePart { } } + /// Maps a 2MB page. + /// + /// # Parameters + /// - `vaddr`: The virtual address to map. + /// - `paddr`: The physical address to map to. + /// - `flags`: The flags to apply to the mapping. + /// - `shared`: Indicates whether the mapping is shared + /// + /// # Returns + /// A result indicating success (`Ok`) or failure (`Err`). pub fn map_2m(
Missing `# Panics` section.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -767,18 +1083,40 @@ impl RawPageTablePart { } } + /// Frees the resources associated with this page table part. fn free(&self) { RawPageTablePart::free_lvl2(&self.page); } + /// Gets the physical address `PhysAddr` of this page table part. + /// + /// # Returns + /// The `PhysAddr` of this page table part. fn address(&self) -> PhysAddr { virt_to_phys(VirtAddr::from(self as *const RawPageTablePart)) } + /// Walks the page table to find the mapping for a given virtual address. + /// + /// # Parameters + /// - `vaddr`: The virtual address to find the mapping for. + /// + /// # Returns + /// The [`Mapping`] for the given virtual address. fn walk_addr(&mut self, vaddr: VirtAddr) -> Mapping<'_> { PageTable::walk_addr_lvl2(&mut self.page, vaddr) } + /// Allocates a 4KB page table entry for a given virtual address. + /// + /// # Parameters + /// - `vaddr`: The virtual address for which to allocate the PTE. + /// + /// # Returns + /// The [`Mapping`] representing the allocated or existing PTE for the address. + /// + /// # Panics + /// Panics if a level 3 mapping is attempted in a [`RawPageTablePart`].
```suggestion /// # Panics /// Panics if `vaddr` corresponds to a level 3 mapping. ```
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -756,6 +1068,10 @@ impl RawPageTablePart { } } + /// Frees a level 2 page table, including all level 1 tables beneath it. + /// + /// # Parameters + /// - `page`: A reference to the level 2 page table to be freed. fn free_lvl2(page: &PTPage) {
I'd say `# Parameters` is overexplaining here.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -746,6 +1054,10 @@ impl RawPageTablePart { Some(unsafe { &mut *address.as_mut_ptr::<PTPage>() }) } + /// Frees a level 1 page table. + /// + /// # Parameters + /// - `page`: A reference to the level 1 page table to be freed.
I'd say `# Parameters` is overexplaining here.
svsm
github_2023
others
272
coconut-svsm
00xc
@@ -677,32 +930,65 @@ impl PageTable { static INIT_PGTABLE: SpinLock<PageTableRef> = SpinLock::new(PageTableRef::unset()); +/// Sets the initial page table unless it is already set. +/// +/// # Parameters +/// - `pgtable`: The page table reference to set as the initial page table. +/// +/// # Panics +/// Panics if the initial page table is already set. +/// pub fn set_init_pgtable(pgtable: PageTableRef) { let mut init_pgtable = INIT_PGTABLE.lock(); assert!(!init_pgtable.is_set()); *init_pgtable = pgtable; } +/// Acquires a lock and returns a guard for the initial page table, which +/// is locked for the duration of the guard's scope. +/// +/// # Returns +/// A `LockGuard` for the initial page table. +/// pub fn get_init_pgtable_locked<'a>() -> LockGuard<'a, PageTableRef> { INIT_PGTABLE.lock() } +/// A reference wrapper for a [`PageTable`]. #[derive(Debug)] pub struct PageTableRef { pgtable_ptr: *mut PageTable, } impl PageTableRef { + /// Creates a new [`PageTableRef`]. + /// + /// # Parameters + /// - `pgtable_ptr`: A raw pointer to a [`PageTable`]. + /// + /// # Returns + /// A new [`PageTableRef`]. + /// pub fn new(pgtable_ptr: *mut PageTable) -> PageTableRef { Self { pgtable_ptr } } + /// Creates an unset [`PageTableRef`], i.e. a NULL pointer. + /// + /// # Returns + /// An unset [`PageTableRef`]. + /// pub const fn unset() -> PageTableRef { PageTableRef { pgtable_ptr: ptr::null_mut(), } } + /// Checks if the [`PageTableRef`] is set, i.e. not NULL. + /// + /// # Returns + /// `true` if the [`PageTableRef`] is set, otherwise `false`. + /// fn is_set(&self) -> bool {
The function signature here is pretty self-explanatory, I'd remove the `# Returns`.