repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
cheriot-rtos
github_2023
others
363
CHERIoT-Platform
rmn30
@@ -812,6 +812,15 @@ namespace CHERI return permission_set_from_pointer(ptr); } + /** + * Drop some permissions from this capability
Is it worth extending this comment to say that you must use this if dropping global from a sealed cap and that it may produce very slightly smaller code?
cheriot-rtos
github_2023
cpp
363
CHERIoT-Platform
rmn30
@@ -240,13 +240,55 @@ namespace } } // namespace +void check_sealed_scoping() +{ + Capability<void> o{switcher_current_thread()}; + TEST(o.is_valid() && (o.type() == CheriSealTypeSealedTrustedStacks), + "Shared object cap not as expected: {}", + o); + + // Take the address of the o cap, requiring that it go out to memory. + Capability<Capability<void>> oP{&o}; + + /* + * Load a copy of our sealed o cap through an authority that lacks + * LoadGlobal permission. The result should be identical to the original + * but without global permission. + */ + Capability<Capability<void>> oPNoLoadGlobal = oP; + oPNoLoadGlobal.drop_permissions(Permission::LoadGlobal); + Capability<void> oLocal1 = *oPNoLoadGlobal; + + TEST(oLocal1.is_valid(), + "Loading global sealed cap through non-LoadGlobal invalid"); + TEST_EQUAL(oLocal1.type(), + o.type(), + "Loading global sealed cap through non-LoadGlobal bad type"); + TEST_EQUAL(static_cast<PermissionSet>(oLocal1.permissions()),
I had to add this cast in the same situation recently and it is slightly annoying. Could we have an implicit conversion or is that a bad idea? You can also get rid of if you make `oLocal1` `const` as there is a `const` version of the `permissions` method that returns a `PermissionSet`.
cheriot-rtos
github_2023
others
363
CHERIoT-Platform
davidchisnall
@@ -812,6 +812,20 @@ namespace CHERI return permission_set_from_pointer(ptr); } + /** + * Drop some permissions from this capability. + * + * Because this function computes the permission mask by clearing bits + * in the PermissionSet::omnipotent() all-ones mask, rather than from + * the set of permissions currently held by this Capability, it is safe + * to use to clear Global permission on a sealed capability. + */ + template<std::same_as<Permission>... Permissions> + void drop_permissions(Permissions... drop)
We already have a without method on PermissionSet, please can you make the naming of this consistent?
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -2,7 +2,7 @@ "devices": { "shadow" : { "start" : 0x30000000, - "end" : 0x30004000 + "length": 0x30000800
That seems wrong?
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -47,17 +47,17 @@ }, "instruction_memory": { "start": 0x00101000, - "end": 0x00140000 + "end": 0x00120000 }, "heap": { - "end": 0x00140000 + "end": 0x00120000 }, "revokable_memory_start": 0x00100000, "defines" : [ "IBEX", "SUNBURST", "SUNBURST_SHADOW_BASE=0x30000000", - "SUNBURST_SHADOW_SIZE=0x4000", + "SUNBURST_SHADOW_SIZE=0x800",
Can this not be computed from the `["devices"]["shadow"]["length"]` value somehow?
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -0,0 +1,52 @@ +Allocating compartment: ----- Simple Case ----- +Allocating compartment: Allocated: 0x109910 (v:1 0x109910-0x109940 l:0x30 o:0x0 p: G RWcgm- -- ---)
Any example that includes addresses are specific to a particular board and its memory map... should we include the board in the directory path?
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +if [ -z "${SONATA_SIMULATOR}" ] ; then + SONATA_SIMULATOR=/cheriot-tools/bin/sonata_simulator +fi
Purely stylistically, these can all be, for example, ``` : ${SONATA_SIMULATOR:=/cheriot-tools/bin/sonata_simulator} ```
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +if [ -z "${SONATA_SIMULATOR}" ] ; then + SONATA_SIMULATOR=/cheriot-tools/bin/sonata_simulator +fi + +if [ -z "${SONATA_SIMULATOR_BOOT_STUB}" ] ; then + SONATA_SIMULATOR_BOOT_STUB=/cheriot-tools/elf/sonata_simulator_boot_stub +fi + +if [ -z "$1" ] ; then + echo You must specify an elf file to run. + exit 1 +fi + +if [ ! -x "${SONATA_SIMULATOR}" ] ; then + echo Unable to locate Sonata simulator, please set SONATA_SIMULATOR to the full path of the simulator. + exit 2 +fi + +if [ ! -x "${SONATA_SIMULATOR_BOOT_STUB}" ] ; then + echo Unable to locate Sonata simulator boot stub, please set SONATA_SIMULATOR_BOOT_STUB to the full path of the boot stub. + exit 3 +fi + +# Remove old uart log +UART_LOG=uart0.log
Possibly make this also parametric as the above paths?
cheriot-rtos
github_2023
others
364
CHERIoT-Platform
nwf
@@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +if [ -z "${SONATA_SIMULATOR}" ] ; then + SONATA_SIMULATOR=/cheriot-tools/bin/sonata_simulator +fi + +if [ -z "${SONATA_SIMULATOR_BOOT_STUB}" ] ; then + SONATA_SIMULATOR_BOOT_STUB=/cheriot-tools/elf/sonata_simulator_boot_stub +fi + +if [ -z "$1" ] ; then + echo You must specify an elf file to run. + exit 1 +fi + +if [ ! -x "${SONATA_SIMULATOR}" ] ; then + echo Unable to locate Sonata simulator, please set SONATA_SIMULATOR to the full path of the simulator. + exit 2 +fi + +if [ ! -x "${SONATA_SIMULATOR_BOOT_STUB}" ] ; then + echo Unable to locate Sonata simulator boot stub, please set SONATA_SIMULATOR_BOOT_STUB to the full path of the boot stub. + exit 3 +fi + +# Remove old uart log +UART_LOG=uart0.log +rm -f ${UART_LOG} + +# If a second argument is provided, check content of UART log.
I kinda wish this were an `expect` script, but I guess this is fine too. :)
cheriot-rtos
github_2023
cpp
373
CHERIoT-Platform
nwf
@@ -923,17 +923,22 @@ __cheriot_minimum_stack(0x1c0) ssize_t return 0; } -__cheriot_minimum_stack(0xf0) int heap_can_free(SObj heapCapability, - void *rawPointer) +__cheriot_minimum_stack(0x260) int heap_can_free(SObj heapCapability, + void *rawPointer) { - STACK_CHECK(0xf0); + // This function requires much less space, but we claim that we require as + // much as `heap_free` so that a call to `heap_free` will not fail due to + // insufficient stack immediately after `heap_can_Free` has said that it's
```suggestion // insufficient stack immediately after `heap_can_free` has said that it's ```
cheriot-rtos
github_2023
others
361
CHERIoT-Platform
davidchisnall
@@ -46,23 +30,45 @@ struct SonataPulseWidthModulation * as only an 8 bit counter is being used. */ uint32_t period; - } outputs[OutputCount]; - /* - * Sets the output of a specified pulse-width modulated output. - * - * The first argument is the index of the output. The second argument is - * the period (length) of the output wave represented as a counter of - * system clock cycles. The third argument is the number of clock cycles - * for which a high pulse is sent within that period. - * - * So for example `output_set(0, 200, 31)` should set a 15.5% output. - */ - void output_set(uint32_t index, uint8_t period, uint8_t dutyCycle) volatile + /* + * Sets the output of a specified pulse-width modulated output. + * + * @param period The length of the output wave represented as a counter + * of system clock cycles. + * @param dutyCycle The number of clock cycles for which a high pulse is + * sent within that period. + * + * So for example `output_set(0, 200, 31)` should set a 15.5% output. + * For a constant high output (100% duty cycle), set the dutyCycle > + * period. + */ + void output_set(uint8_t period, uint8_t dutyCycle) volatile + { + this->period = period; + this->dutyCycle = dutyCycle; + } + }; + + /// A convenience structure that can map onto multiple PWM outputs. + template<size_t NumberOfPwms = 6> + struct Array
Is there a reason that this isn't a `std::array`? It looks like it's equivalent to a subset of `std:array`.
cheriot-rtos
github_2023
others
361
CHERIoT-Platform
davidchisnall
@@ -1,37 +1,21 @@ #pragma once +#include <array>
I think this is stale now?
cheriot-rtos
github_2023
others
361
CHERIoT-Platform
davidchisnall
@@ -46,23 +30,45 @@ struct SonataPulseWidthModulation * as only an 8 bit counter is being used. */ uint32_t period; - } outputs[OutputCount]; - /* - * Sets the output of a specified pulse-width modulated output. - * - * The first argument is the index of the output. The second argument is - * the period (length) of the output wave represented as a counter of - * system clock cycles. The third argument is the number of clock cycles - * for which a high pulse is sent within that period. - * - * So for example `output_set(0, 200, 31)` should set a 15.5% output. - */ - void output_set(uint32_t index, uint8_t period, uint8_t dutyCycle) volatile + /* + * Sets the output of a specified pulse-width modulated output. + * + * @param period The length of the output wave represented as a counter + * of system clock cycles. + * @param dutyCycle The number of clock cycles for which a high pulse is + * sent within that period. + * + * So for example `output_set(0, 200, 31)` should set a 15.5% output. + * For a constant high output (100% duty cycle), set the dutyCycle > + * period. + */ + void output_set(uint8_t period, uint8_t dutyCycle) volatile + { + this->period = period; + this->dutyCycle = dutyCycle; + } + }; + + /// A convenience structure that can map onto multiple PWM outputs. + template<size_t NumberOfPwms = 6> + struct Array { - Debug::Assert(index < OutputCount, "Specified PWM is out of range"); - Debug::Assert(dutyCycle <= period, "Duty cycle cannot exceed 100%"); - outputs[index].period = period; - outputs[index].dutyCycle = dutyCycle; - } -}; + Output output[NumberOfPwms]; + + template<size_t Index> + volatile Output *get() volatile + { + static_assert(Index < NumberOfPwms, "PWM index out of bounds"); + return output + Index; + } + }; + + /** + * There are six general purpose PWM outputs are general purpose that can be + * pinmuxed to different outputs. + */ + using General = Array<6>; + /// There is one dedicated PWM for the LCD backlight. + using Lcd = Output;
Maybe call this `LcdBacklight`? It's a bit confusing to have a thing called Lcd that isn't the LCD interface.
cheriot-rtos
github_2023
others
358
CHERIoT-Platform
rmn30
@@ -1659,6 +1659,7 @@ exception_entry_asm: * mtdc: pointer to TrustedStack */ csrw mcause, MCAUSE_THREAD_EXIT + csrw mtval, MCAUSE_THREAD_EXIT
Worth a comment here to indicate that this avoids a small info leak to the scheduler?
cheriot-rtos
github_2023
others
358
CHERIoT-Platform
rmn30
@@ -274,21 +274,19 @@ __Z26compartment_switcher_entryz: * and to be run with interrupts deferred, we'd like the switcher, and * especially its stack-zeroing, to be preemtable. */ - cincoffset ct2, csp, -SPILL_SLOT_SIZE .Lswitch_entry_first_spill: /* * FROM: above * ITO: .Lswitch_just_return (via .Lhandle_error_in_switcher) */ - csc cs0, SPILL_SLOT_cs0(ct2) - csc cs1, SPILL_SLOT_cs1(ct2) - csc cgp, SPILL_SLOT_cgp(ct2) - csc cra, SPILL_SLOT_pcc(ct2) - cmove csp, ct2 + csc cs0, (SPILL_SLOT_cs0-SPILL_SLOT_SIZE)(csp) + csc cs1, (SPILL_SLOT_cs1-SPILL_SLOT_SIZE)(csp) + csc cgp, (SPILL_SLOT_cgp-SPILL_SLOT_SIZE)(csp) + csc cra, (SPILL_SLOT_pcc-SPILL_SLOT_SIZE)(csp)
Unfortunately the negative offset means we don't get to use compressed `c.scsp` instruction here. `ct2` is also not accessible to the compressed `c.csc` so the instruction saved is still a net win. Remind me why we can't do the `cincoffset` before into `sp`? So that we can recover `csp` on entry in the case where the cincoffset takes it below `csp.base`?
cheriot-rtos
github_2023
others
358
CHERIoT-Platform
rmn30
@@ -1021,6 +1050,14 @@ exception_entry_asm: csetaddr csp, csp, gp cunseal csp, ca0, csp // Atlas update: sp: unsealed target thread trusted stack pointer + /* + * LOCAL SEAL: if the scheduler has shed GL(obal) of the reference it gave + * us in a0, then sp will also lack GL(obal) after unsealing. This + * reference is not stored in memory (in the switcher, anyway), just mtdc. + * However, when this reference is extracted and sealed for the next + * context switch (in .Lexception_scheduler_call), the result will lack + * GL(obal), which will likely prove challenging for the scheduler.
> which will likely prove challenging for the scheduler I think this is understatement?! I'm not so familiar with the scheduler but presumably it keeps the sealed trusted stack pointers in global data structures. It sounds like this would only allow the scheduler to crash itself so not really an issue but I wonder if there are other ways a non-GL context pointer could make its way into the scheduler? `switcher_interrupt_thread` looks like it needs investigating.
cheriot-rtos
github_2023
others
362
CHERIoT-Platform
rmn30
@@ -127,8 +115,12 @@ _Z16token_obj_unsealP10SKeyStructP10SObjStruct: .hidden _Z23token_obj_unseal_staticP10SKeyStructP10SObjStruct .globl _Z23token_obj_unseal_staticP10SKeyStructP10SObjStruct _Z23token_obj_unseal_staticP10SKeyStructP10SObjStruct: - li a2, CheriSealTypeStaticToken - j .Ltoken_unseal_internal + auipcc ca2, %cheriot_compartment_hi(__sealingkey) + clc ca2, %cheriot_compartment_lo_i(_Z23token_obj_unseal_staticP10SKeyStructP10SObjStruct)(ca2) + li t0, CheriSealTypeStaticToken + csetaddr ca2, ca2, t0 + csetbounds ca2, ca2, 1
I guess we could avoid these extra instructions by having three versions of `__sealingKey` in memory and loading the appropriate one? I think that would be a net win on space and cycles?
cheriot-rtos
github_2023
cpp
362
CHERIoT-Platform
rmn30
@@ -1267,15 +1267,31 @@ extern "C" SchedulerEntryInfo loader_entry_point(const ImgHdr &imgHdr, sizeof(void *), PermissionSet{Permission::Global, Permission::Unseal}); - setSealingKey(imgHdr.allocator(), Allocator); + /* + * The token library unseals both static and dynamic objects, sometimes + * either and sometimes with static knowledge of which is expected. To + * avoid `li; csetaddr; csetbounds` sequences, we give it a separate cap + * for each case. + */ setSealingKey(imgHdr.token_library(), Allocator, 2, // Allocator and StaticToken
Could we have a `static_assert` here that `StaticToken == (Allocator + 1)`?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
rmn30
@@ -183,159 +188,266 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: + /* + * Atlas: + * mtdc: thread trusted stack pointer + * ra: caller return address (nominally; corruption is on them)
> (nominally; corruption is on them) I think that this is entered via an interrupt disabling sentry so we can be sure that `ra` is a return sentry to the caller. If we ever change that to interrupt inheriting we'd lose that guarantee but I agree it doesn't matter much.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
rmn30
@@ -183,159 +188,266 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: + /* + * Atlas: + * mtdc: thread trusted stack pointer + * ra: caller return address (nominally; corruption is on them) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t1: import table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5, t0: caller arguments to callee, passed or zered + * tp, t2: scratch + * + * Traps on an invalid csp or an overflow (even mid-way through this spill + * sequence) will trigger invocation of the compartment's error handler, or + * forcibly unwind if none. Nothing secret from the caller would be visible.
```suggestion * forcibly unwind if none. Nothing secret from the switcher would be visible. ```
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered
Specifically, this is the capability to arguments that don’t fit in registers (including variadic ones). Note: The caller is not required to zero this, we do, just like a0-a5.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling
Do we actually rely on this property? I would hope that without it we could accept any return sentry here.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill
This took a couple of attempts to parse. The caller is asserting that they don’t care about the values in caller-save registers. There is no obligation to spill them, they may simply be unused.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if
I don’t think that’s a safe assumption. What happens if: - I clear store global from CSP and don’t have local things in cs0? - I put a heap cap in CSP and free it in another thread? I think this can’t happen yet but will be possible once the switcher runs with interrupts enabled (or we do multicore). After more thinking, I think the code is right but the comment is misleading. If we've *accidentally* run out of stack space, we'll trap on the first instruction and so should gracefully return. If we've maliciously done bad things to csp before invoking the switcher, we should be force unwound.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind
Not that this check checks for store local which ensures that this is a stack and so later accesses cannot fault.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG
We might be able to remove this now. I think Ibex and Sail both log cspecialr, it was only Flute that didn't, so we needed to do a cmove to make it log the value.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack .
Maybe explicitly state that the trusted stack grows upwards (i.e. 0 is the initial trusted stack frame, 1 the first cross-compartment call, and so on).
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again)
We can probably skip this. We've spilled cs0 at this point, so we could use cs0 instead of t2 on line 275 and 282 and therefore preserve the value of ct2.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller
Still? I guess there was a brief two-instruction period when it wasn't, but I wouldn't want to rely on that.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer
Clarify that this is the modified csp that includes the save area?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action.
Can you leave a not for formal verification: Ideally, we'd prove that this store is dead and remove this instruction.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar
Specifically, the length of the new stack.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base
This doesn't appear to be used, can we view it as dead at this point?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */
```suggestion * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed */ ```
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key
It would be nice to have a different annotation for deltas to the atlas and complete atlases. It would also be nice if the delta atlases included registers that are now dead.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries
While you're making things symbolic constants...
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset.
Maybe say explicitly that it's populated by the caller and we've kept that register live throughout this?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry
Or an untagged capability if the caller is malicious.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault.
Maybe flag this as a proof obligation?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1)
I've lost track. The clobber of tp is the trusted stack frame? Can the comment say that explicitly? Terminology like 'drop the reference' is confusing if it means clobbering a register.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar
Isn't tp now the minimum stack size for the callee?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar
Isn't it the size of the stack?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz
And `.Lafter_zero`?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector
Vector?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra
Implicit clobber of cra. Not essential, because the value will be in pcc as well.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -713,77 +1181,147 @@ exception_entry_asm: cmove csp, ca0 .Linvoke_error_handler: - // Clear all registers except: - // cra is set by cjalr. csp and cgp are needed for the called compartment. - // ca0, used for the register state - // ca1, used for mcause - // ca2, used for mtval + /* + * FROM: above, .Lset_up_stack_handler_stackless + * LIVE IN: ra, sp, gp, a0, a1, a2 + * + * Atlas: + * ra: handler entrypoint (with bounds of compartment's .text) + * gp: target compartment cgp + * sp: target compartment invocation stack pointer + * a0, a1, a2: arguments to handler. + */ + /* + * For a stackful handler, the arguments are: + * - a0: equal to the invocation stack (sp), with a register spill frame + * here and above (the stack grows down!) + * - a1: mcause + * - a2: mtval + * + * While for stackless, the arguments are: + * - a0: mcause + * - a1: mtval + * - a2: zero + */ + + // Clear all other registers and invoke the handler zeroAllRegistersExcept ra, sp, gp, a0, a1, a2 - // Call the handler. cjalr cra +.Lhandler_return:
Why is there a label here? Just for documentation? (I accidentally saw this one first, because GitHub scrolled me near the end early on by accident, but I find this makes it hard to read throughout)
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed)
Might be worth having two kinds of dead to use consistently: - Dead, must not leak. - Dead, may leak but shouldn't. I'm not sure if it's worth this distinction though because the risk if we get it wrong is high, so it's probably better to just have a rule 'do not leak dead register values'.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such.
I find this somewhat misleading, because we have two notions of well-bracketed. We cannot assume intra-compartment well-bracketed control flow, but we can assume / enforce well-bracketed cross-compartment control flow. If something deeply nested in a compartment invokes this return sentry, that's fine, we pop the top trusted-stack frame.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return:
Why is this a label?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value.
This is reached *after* we've done the stack checks, so csp contains the store things?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp)
What happens if csp is write-only? We fault here and force unwind (I think we have a test for that, and I think it passes?)?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1
```suggestion // Zero everything else zeroAllRegistersExcept ra, sp, gp, s0, s1, a0 ``` NFC.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error
I don't think interrupts are either of those things?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack.
I know we still have the header with the silly name, but can we avoid tstack as a contraction?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack. + * In general, mtdc holds the trusted stack register. We are here with + * interrupts off and precious few registers available to us, so swap it + * with the csp (we'll put it back, later).
It would be nice if the atlases could capture this. We have a load of live-in mtdc, but it's almost an invariant that mtdc contains the trusted stack on every code path, except for a few where it contains the (untrusted) captured csp. That's possibly a better property to document than the fact that mtdc is live in.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack. + * In general, mtdc holds the trusted stack register. We are here with + * interrupts off and precious few registers available to us, so swap it + * with the csp (we'll put it back, later). + */ cspecialrw csp, mtdc, csp #ifndef NDEBUG // XXX: This move is useless, but just for debugging in the simulator. cmove csp, csp #endif
As previously, I think this can go.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack. + * In general, mtdc holds the trusted stack register. We are here with + * interrupts off and precious few registers available to us, so swap it + * with the csp (we'll put it back, later). + */ cspecialrw csp, mtdc, csp #ifndef NDEBUG // XXX: This move is useless, but just for debugging in the simulator. cmove csp, csp #endif - // If we read out zero, we've reentered the exception and are about to - // trap. Make sure that we end up in an architectural trap loop: clobber - // mtcc, so that trapping attempts to vector to an untagged PCC, thereby - // causing another (i.e., a third) trap in spillRegisters, below. - // - // While that's a good start, it does not guarantee that we end up in a - // trap loop: the reentry will probably have put something non-zero into - // mtdc, so we wouldn't hit this, and wouldn't loop, when we take that - // third trap. (Exactly what we'd do instead is hard to say; we'd try - // spilling registers to an attacker-controlled pointer, at the very - // least.) Therefore, clobber mtcc (!) to ensure that the certainly - // upcoming third trap puts us in an architectural trap loop. This is - // slightly preferable to clearing mtdc, which would also ensure that we - // looped, because the architectural loop is tighter and involves no - // program text, making it easier for microarchitecture to detect. + /*
Can you check the disassembly. In an older version, this kind of comment was breaking line numbers. I think it's fixed, but that's why I avoided multi-line comments originally.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack. + * In general, mtdc holds the trusted stack register. We are here with + * interrupts off and precious few registers available to us, so swap it + * with the csp (we'll put it back, later). + */ cspecialrw csp, mtdc, csp #ifndef NDEBUG // XXX: This move is useless, but just for debugging in the simulator. cmove csp, csp #endif - // If we read out zero, we've reentered the exception and are about to - // trap. Make sure that we end up in an architectural trap loop: clobber - // mtcc, so that trapping attempts to vector to an untagged PCC, thereby - // causing another (i.e., a third) trap in spillRegisters, below. - // - // While that's a good start, it does not guarantee that we end up in a - // trap loop: the reentry will probably have put something non-zero into - // mtdc, so we wouldn't hit this, and wouldn't loop, when we take that - // third trap. (Exactly what we'd do instead is hard to say; we'd try - // spilling registers to an attacker-controlled pointer, at the very - // least.) Therefore, clobber mtcc (!) to ensure that the certainly - // upcoming third trap puts us in an architectural trap loop. This is - // slightly preferable to clearing mtdc, which would also ensure that we - // looped, because the architectural loop is tighter and involves no - // program text, making it easier for microarchitecture to detect. + /* + * If we read out zero, we've reentered the exception and are about to trap + * (in spillRegisters, which uses sp as its authority). Make sure that we + * end up in an architectural trap loop: clobber mtcc, so that that trap + * attempts to vector to an untagged PCC, thereby causing another trap, + * which immediately traps, and so on. + * + * We could instead zero mtdc, ensuring that we spin through several + * instructions (taking a trap then each of cspecialrw, bnez, cspecialw, and + * then the traping csc), but this is less architecturally visible. + * + * Failure to do either would mean that the trap in spillRegisters below + * would re-enter the trap-handler with an unknown value (the first trap's + * sp) in mtdc, which the rest of this code would take to be a valid + * TrustedStack. Exactly what would happen then is hard to say; we'd try + * spilling registers to a potentially attacker-controlled pointer, at the + * very least, and that's something to avoid. + */ bnez sp, .Lexception_entry_still_alive cspecialw mtcc, csp
Can you document that this is writing 0 but we can't do `cspecialw mtcc, cnull` because `cspecialw` is actually an alias for `cspecialrw` and `cnull` as the source means 'don't write'? I'd prefer an explicit move from cnull here because the branch guarantees only that the address is 0. `csp` may still be tagged and executable, which can cause some fun (probably no issues in real hardware, a potential problem for formal verification).
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, * + * + * Atlas: + * mtdc: either pointer to TrustedStack or zero + */ + /* + * We do not trust the interruptee's context. We cannot use its stack in any + * way. The save reg frame we can use is fetched from the tStack. + * In general, mtdc holds the trusted stack register. We are here with + * interrupts off and precious few registers available to us, so swap it + * with the csp (we'll put it back, later). + */ cspecialrw csp, mtdc, csp #ifndef NDEBUG // XXX: This move is useless, but just for debugging in the simulator. cmove csp, csp #endif - // If we read out zero, we've reentered the exception and are about to - // trap. Make sure that we end up in an architectural trap loop: clobber - // mtcc, so that trapping attempts to vector to an untagged PCC, thereby - // causing another (i.e., a third) trap in spillRegisters, below. - // - // While that's a good start, it does not guarantee that we end up in a - // trap loop: the reentry will probably have put something non-zero into - // mtdc, so we wouldn't hit this, and wouldn't loop, when we take that - // third trap. (Exactly what we'd do instead is hard to say; we'd try - // spilling registers to an attacker-controlled pointer, at the very - // least.) Therefore, clobber mtcc (!) to ensure that the certainly - // upcoming third trap puts us in an architectural trap loop. This is - // slightly preferable to clearing mtdc, which would also ensure that we - // looped, because the architectural loop is tighter and involves no - // program text, making it easier for microarchitecture to detect. + /* + * If we read out zero, we've reentered the exception and are about to trap + * (in spillRegisters, which uses sp as its authority). Make sure that we + * end up in an architectural trap loop: clobber mtcc, so that that trap + * attempts to vector to an untagged PCC, thereby causing another trap, + * which immediately traps, and so on. + * + * We could instead zero mtdc, ensuring that we spin through several + * instructions (taking a trap then each of cspecialrw, bnez, cspecialw, and + * then the traping csc), but this is less architecturally visible. + * + * Failure to do either would mean that the trap in spillRegisters below + * would re-enter the trap-handler with an unknown value (the first trap's + * sp) in mtdc, which the rest of this code would take to be a valid + * TrustedStack. Exactly what would happen then is hard to say; we'd try + * spilling registers to a potentially attacker-controlled pointer, at the + * very least, and that's something to avoid. + */ bnez sp, .Lexception_entry_still_alive cspecialw mtcc, csp .Lexception_entry_still_alive: - - // csp now points to the save reg frame that we can use. - // The guest csp (c2) is now in mtdc. Will be spilled later, but we - // spill all the other 14 registers now. + /* + * LIVE IN: mcause, mtdc, sp + * + * Atlas: + * mtdc: the interrupted context's sp + * sp: TrustedStack (and, in particular, a spill frame we can use) + */ + + /* + * The guest sp/csp (x2/c2) is now in mtdc. Will be spilled later, but we + * spill all the other 14 registers now. + */ spillRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5 - // If a thread has exited then it will set a fake value in the mcause so - // that the scheduler knows not to try to resume it. + /* + * This label's name is somewhat confusing. Not all threads arriving here
Maybe rename it to something like `.Lexiting_threads_rejoin`?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -183,222 +190,479 @@ switcher_scheduler_entry_csp: .p2align 2 .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: - cincoffset csp, csp, -SPILL_SLOT_SIZE - csc cs0, SPILL_SLOT_cs0(csp) - csc cs1, SPILL_SLOT_cs1(csp) - csc cgp, SPILL_SLOT_cgp(csp) - csc cra, SPILL_SLOT_pcc(csp) - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address (ensured because we enter via an IRQ-disabling + * forward sentry, which requires ra as the destination register of the + * cjalr the caller used). + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed/0ed + * tp, t2: scratch + */ + /* + * The caller should back up all caller saved registers. Spill + * callee-save registers carefully. If we find ourselves unable to do + * so, we'll return an error to the caller (via the exception path; see + * .Lhandle_error_in_switcher). The error handling path assumes that + * the first spill is to the lowest address and guaranteed to trap if + * any would. The register file is safe to expose to the caller. + */ + cincoffset ct2, csp, -SPILL_SLOT_SIZE +.Lswitcher_entry_first_spill: + csc cs0, SPILL_SLOT_cs0(ct2) + csc cs1, SPILL_SLOT_cs1(ct2) + csc cgp, SPILL_SLOT_cgp(ct2) + csc cra, SPILL_SLOT_pcc(ct2) + cmove csp, ct2 + /* + * Atlas: + * ra, gp, s0, s1: scratch (presently, redundant caller values) + * t2: scratch (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the + * compartment's csp is valid. If not, force unwind. Note that this + * check is purely to protect the callee, not the switcher itself. + * + * Make sure the caller's CSP has the expected permissions and that its + * top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the + * stores above. + * + * Uses tp and t2 as scratch scalars. + */ + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lforce_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lforce_unwind + // Atlas: sp: the caller's stack pointer, now validated + // mtdc should always have an offset of 0. cspecialr ct2, mtdc + // Atlas: t2: a pointer to this thread's TrustedStack structure #ifndef NDEBUG // XXX: This line is useless, only for mtdc to show up in debugging. cmove ct2, ct2 #endif - clear_hazard_slots ct2, ctp - - // make sure the trusted stack is still in bounds + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure. If the frame offset points "one past the end" (or futher out), + * we have no more frames available, so off to .Lout_of_trusted_stack . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) cgetlen t2, ct2 + /* + * Atlas: + * t2: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] + */ + // LIVE OUT: mtdc, sp bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was + // we are past the stacks checks. cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + // Atlas: t2: pointer to this thread's TrustedStack (again) + // The register file is (again) unsafe to expose to the caller cincoffset ctp, ct2, tp + // Atlas: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new tstack offset, any fault is + * seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) -#ifndef CONFIG_NO_SWITCHER_SAFETY - // Chop off the stack. + + // Chop off the stack, using s1 to hold a scratch scalar cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * t2: pointer to stack, with bounds from stack base to boundary in s0, + * cursor at stack base + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: (still) pointer to the freshly populated TrustedStackFrame + * t1: (still) sealed export table entry for the target callee + * a0, a1, a2, a3, a4, a5, t0: (still) call argument values / to be zeroed + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge t2, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. + bge gp, sp, .Lafter_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp .Lafter_zero: + /* + * LIVE IN: mtdc, sp, tp, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * t2, gp: caller stack capabilities (dead) + * s0: scratch scalar (dead) + */ // Reserve space for unwind state and so on. cincoffset csp, csp, -STACK_ENTRY_RESERVED_SPACE + // Atlas: sp: pointer to stack, below compartment invocation local storage #ifdef CONFIG_MSHWM // store new stack top as stack high water mark csrw CSR_MSHWM, sp #endif -#endif // CONFIG_NO_SWITCHER_SAFETY -.Lout: - // Fetch the sealing key + + // Fetch the sealing key, using gp as a scratch scalar LoadCapPCC cs0, compartment_switcher_sealing_key - li gp, 9 + // Atlas: s0: switcher sealing key + li gp, 9 // loader/boot.cc:/SealedImportTableEntries csetaddr cs0, cs0, gp - // The target capability is in ct1. Unseal, check tag and load the entry point offset. + // The target capability is in t1. Unseal and load the entry point offset. cunseal ct1, ct1, cs0 - // Load the entry point offset. If cunseal failed then this will fault and - // we will force unwind. + /* + * Atlas: + * t1: unsealed pointer with bounds encompassing callee compartment + * ExportTable and ExportEntry array and cursor pointing at the + * callee ExportEntry + */ + /* + * Load the entry point offset. If cunseal failed then this will fault and + * we will force unwind; see .Lhandle_error_switcher_pcc_check. + */ clhu s0, ExportEntry_offset_functionStart(ct1) - // At this point, we know that the cunseal has succeeded (we didn't trap on - // the load) and so it's safe to store the unsealed value of the export - // table pointer. Nothing between this point and transition to the callee - // should fault. + // Atlas: s0: callee compartment function entrypoint offset (scalar) + /* + * At this point, we know that the cunseal has succeeded (we didn't trap on + * the load) and so it's safe to store the unsealed value of the export + * table pointer. Nothing between this point and transition to the callee + * should fault. + */ csc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Load the minimum stack size required by the callee. + /* + * Load the minimum stack size required by the callee. At this point we + * drop the register file's reference to the TrustedStackFrame, bringing us + * closer to a register file that is not secret from the callee. + */ clbu tp, ExportEntry_offset_minimumStackSize(ct1) + // Atlas: tp: scratch scalar // The stack size is in 8-byte units, so multiply by 8. slli tp, tp, 3 - // Check that the stack is large enough for the callee. - // At this point, we have already truncated the stack and so the length of - // the stack is the length that the callee can use. + /* + * Check that the stack is large enough for the callee. + * At this point, we have already truncated the stack and so the length of + * the stack is the length that the callee can use. + */ cgetlen t2, csp + // Atlas: t2: scratch scalar // Include the space we reserved for the unwind state. addi t2, t2, -STACK_ENTRY_RESERVED_SPACE + // LIVE OUT: mtdc bgtu tp, t2, .Lstack_too_small // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas: ra: target function entry vector (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ .Lload_zero_arguments_start: auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + // Atlas: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 .Lzero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 bnez t1, .Lskip_interrupt_disable csrsi mstatus, 0x8 .Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: + + /* + * Atlas: + * ra: (still) target function entry vector + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ zeroRegisters tp, t1, t2, s0, s1 cjalr cra .Lskip_compartment_call: - // If we are doing a forced unwind of the trusted stack then we do almost - // exactly the same as a normal unwind. We will jump here from the - // exception path. + /* + * FROM: malice, above, .Lstack_too_small + * LIVE IN: mtdc, a0, a1 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0, a1: return value(s). The callee function must ensure that it clears + * these as appropriate if it is returning 0 or 1 values and not 2. + * ra, sp, gp: dead or callee state (to be replaced by caller) + * tp, s0, s1, t0, t1, t2, a2, a3, a4, a5: dead or callee state (to be 0ed) + */ + + /* + * The return sentry given to the callee as part of that cjalr could be + * captured by the callee or passed back to the caller. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer. The contents + * of a0 and a1 will be exposed to the compartment above the one currently + * executing, or the thread will be terminated if there is no such. + */ + + /* + * If we are doing a forced unwind of the trusted stack then we do almost + * exactly the same as a normal unwind. We will jump here from the + * exception path. + * + * XXX? Is that still right? + */ + + // LIVE OUT: mtdc, a0, a1 cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, csp and cgp needed for the compartment - // cs0 saved and restored on trusted stack - // cs1 saved and restored on trusted stack - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * ra, sp, gp, s0, s1: restored caller values + * a0, a1: (still) return value(s), as above + */ zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 +.Ljust_return: cret - // If the stack is too small, we don't do the call, but to avoid leaking - // any other state we still go through the same return path as normal. We - // set the return registers to -ENOTENOUGHSTACK and 0, so users can see - // that this is the failure reason. + /* + * If the stack is too small, we don't do the call, but to avoid leaking + * any other state we still go through the same return path as normal. We + * set the return registers to -ENOTENOUGHSTACK and 0, so users can see + * that this is the failure reason. + */ .Lstack_too_small: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc + * + * Atlas: + * mtdc: thread trusted stack pointer + */ li a0, -ENOTENOUGHSTACK li a1, 0 + // LIVE OUT: mtdc, a0, a1 j .Lskip_compartment_call + + /* + * If we have run out of trusted stack, then just restore the caller's state + * and return an error value. + */ +.Lout_of_trusted_stack: + /* + * FROM: __Z26compartment_switcher_entryz + * LIVE IN: mtdc, sp + * + * Atlas: + * mtdc: TrustedStack pointer + * sp: Caller stack pointer, pointing at switcher spill frame + */ + // Restore the spilled values + clc cs0, SPILL_SLOT_cs0(csp) + clc cs1, SPILL_SLOT_cs1(csp) + clc cra, SPILL_SLOT_pcc(csp) + clc cgp, SPILL_SLOT_cgp(csp) + cincoffset csp, csp, SPILL_SLOT_SIZE + // Set the return registers + li a0, -ENOTENOUGHTRUSTEDSTACK + li a1, 0 + // Zero everything else + zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 + cret + .size compartment_switcher_entry, . - compartment_switcher_entry - // the entry point of all exceptions and interrupts - // For now, the entire routine is run with interrupts disabled. .global exception_entry_asm .p2align 2 +/** + * The entry point of all exceptions and interrupts + * + * For now, the entire routine is run with interrupts disabled. + */ exception_entry_asm: - // We do not trust the interruptee's context. We cannot use its stack in any way. - // The save reg frame we can use is fetched from the tStack. - // In general, mtdc holds the trusted stack register. We are here with - // interrupts off and precious few registers available to us, so swap it - // with the csp (we'll put it back, later). + /* + * FROM: malice, error + * IRQ: deferred + * LIVE IN: mcause, mtdc, *
mtval?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -414,25 +678,39 @@ exception_entry_asm: csrr t1, mcause csw t1, TrustedStack_offset_mcause(csp) - // If we hit one of the exception conditions that we should let - // compartments handle then deliver it to the compartment. - // CHERI exception code. - li a0, 0x1c + /* + * If we hit one of the exception conditions that we should let + * compartments handle then deliver it to the compartment.
The name of the label is more accurate than the comment, because we only *might* let the compartment handle it.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -414,25 +678,39 @@ exception_entry_asm: csrr t1, mcause csw t1, TrustedStack_offset_mcause(csp) - // If we hit one of the exception conditions that we should let - // compartments handle then deliver it to the compartment. - // CHERI exception code. - li a0, 0x1c + /* + * If we hit one of the exception conditions that we should let + * compartments handle then deliver it to the compartment. + */ +.Lexception_might_handle:
Doc-only label? Can we annotate these in some way so I don't look for jumps to it?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -444,57 +722,106 @@ exception_entry_asm: LoadCapPCC csp, switcher_scheduler_entry_csp LoadCapPCC cgp, switcher_scheduler_entry_cgp LoadCapPCC cra, switcher_scheduler_entry_pcc + /* + * Atlas: + * ra, gp: scheduler compartment context + * sp: scheduler thread context + * a0: sealed trusted stack pointer (opaque thread handle) + * a1: copy of mcause + * a2: copy of mepc + * a3: copy of mtval + */ // Zero everything apart from things explicitly passed to scheduler. - // cra, csp and cgp needed for the scheduler compartment - // ca0, used for the sealed trusted stack argument - // ca1, used for mcause - // ca2, used for mepc - // ca3, used for mtval zeroAllRegistersExcept ra, sp, gp, a0, a1, a2, a3 // Call the scheduler. This returns the new thread in ca0. cjalr cra - // The scheduler may change interrupt posture or may trap, but if it - // returns to us (that is, we reach here), the use of the sentry created by - // cjalr will have restored us to deferring interrupts, and we will remain - // in that posture until the mret in install_context. - // Switch onto the new thread's trusted stack - LoadCapPCC ct0, compartment_switcher_sealing_key - li gp, 10 - csetaddr ct0, ct0, gp - cunseal csp, ca0, ct0 - clw t0, TrustedStack_offset_mcause(csp) +.Lscheduler_return: + /* + * FROM: above + * IRQ: deferred + * LIVE IN: a0 + * + * Atlas: + * a0: sealed trusted stack pointer to bring onto core + */ + /* + * The interrupts-disabling return sentry handed to the scheduler as part of + * that cjalr may be captured on its stack, but as the scheduler is the + * topmost and only compartment in its thread (as it cannot make + * cross-compartment calls), there is very little that can go wrong as as a
Note that the scheduler can reach the compartment-call entry point, we should document that the zero-mtdc defends against it trying to do cross-compartment calls from here.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -444,57 +722,106 @@ exception_entry_asm: LoadCapPCC csp, switcher_scheduler_entry_csp LoadCapPCC cgp, switcher_scheduler_entry_cgp LoadCapPCC cra, switcher_scheduler_entry_pcc + /* + * Atlas: + * ra, gp: scheduler compartment context + * sp: scheduler thread context + * a0: sealed trusted stack pointer (opaque thread handle) + * a1: copy of mcause + * a2: copy of mepc + * a3: copy of mtval + */ // Zero everything apart from things explicitly passed to scheduler. - // cra, csp and cgp needed for the scheduler compartment - // ca0, used for the sealed trusted stack argument - // ca1, used for mcause - // ca2, used for mepc - // ca3, used for mtval zeroAllRegistersExcept ra, sp, gp, a0, a1, a2, a3 // Call the scheduler. This returns the new thread in ca0. cjalr cra - // The scheduler may change interrupt posture or may trap, but if it - // returns to us (that is, we reach here), the use of the sentry created by - // cjalr will have restored us to deferring interrupts, and we will remain - // in that posture until the mret in install_context. - // Switch onto the new thread's trusted stack - LoadCapPCC ct0, compartment_switcher_sealing_key - li gp, 10 - csetaddr ct0, ct0, gp - cunseal csp, ca0, ct0 - clw t0, TrustedStack_offset_mcause(csp) +.Lscheduler_return: + /* + * FROM: above + * IRQ: deferred + * LIVE IN: a0 + * + * Atlas: + * a0: sealed trusted stack pointer to bring onto core + */ + /* + * The interrupts-disabling return sentry handed to the scheduler as part of + * that cjalr may be captured on its stack, but as the scheduler is the + * topmost and only compartment in its thread (as it cannot make + * cross-compartment calls), there is very little that can go wrong as as a + * result of that capture. + */ + /* + * The scheduler may change interrupt posture or may trap (and infinite loop + * if it does so; see the top of exception_entry_asm and recall that mtdc is + * 0 at this point), but if it returns to us (that is, we reach here), the + * use of the sentry created by cjalr will have restored us to deferring + * interrupts, and we will remain in that posture until the mret in + * install_context. + */ + + // Switch onto the new thread's trusted stack, using gp as a scratch scalar + LoadCapPCC csp, compartment_switcher_sealing_key + li gp, 10 // loader/boot.cc:/SealedTrustedStacks + csetaddr csp, csp, gp + cunseal csp, ca0, csp + // Atlas: sp: unsealed target thread trusted stack pointer - // Only now that we have done something that actually requires the tag of - // csp be set, put it into mtdc. If the scheduler has returned something - // untagged or something with the wrong otype, the cunseal will have left - // csp untagged and clw will trap with mtdc still 0. If we made it here, - // though, csp is tagged and so was tagged and correctly typed, and so it - // is safe to install it to mtdc. We won't cause traps between here and - // mret, so reentrancy is no longer a concern. + clw t0, TrustedStack_offset_mcause(csp) + // Atlas: t0: stored mcause for the target thread + + /* + * Only now that we have done something that actually requires the tag of + * csp be set, put it into mtdc. If the scheduler has returned something + * untagged or something with the wrong otype, the cunseal will have left + * csp untagged and clw will trap with mtdc still 0. If we made it here, + * though, csp is tagged and so was tagged and correctly typed, and so it + * is safe to install it to mtdc. We won't cause traps between here and + * mret, so reentrancy is no longer a concern. + */ cspecialw mtdc, csp - - // If mcause is 25, then we will jump into the error handler: another - // thread has signalled that this thread should be interrupted. 25 is a - // reserved exception number that we repurpose to indicate explicit - // interruption. - li t1, 25 + // Atlas: mtdc: TrustedStack pointer + + /* + * If mcause is MCAUSE_THREAD_INTERRUPT, then we will jump into the error + * handler: another thread has signalled that this thread should be + * interrupted. MCAUSE_THREAD_INTERRUPT is a reserved exception number that + * we repurpose to indicate explicit interruption. + */ + li t1, MCAUSE_THREAD_INTERRUPT + // LIVE OUT: mtdc, sp beq t0, t1, .Lhandle_injected_error - // Environment call from M-mode is exception code 11. - // We need to skip the ecall instruction to avoid an infinite loop. + /* + * Environment call from M-mode is exception code 11. + * We need to skip the ecall instruction to avoid an infinite loop.
We could also remove mcall (or treat it as a local error that goes to the error handler) and expose a yield entry point, if that would simplify the code.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -504,207 +831,348 @@ exception_entry_asm: csrw CSR_MSHWMB, ra #endif cspecialw mepcc, ct2 - // csp (c2) will be loaded last and will overwrite the trusted stack pointer - // with the thread's stack pointer. + + /* + * reloadRegisters restores registers in the order given, and we ensure that + * sp/csp (x2/c2) will be loaded last and will overwrite the trusted stack + * pointer with the thread's stack pointer. + */ reloadRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5, csp mret -// We are starting a forced unwind. This is reached either when we are unable -// to run an error handler, or when we do run an error handler and it instructs -// us to return. This treats all register values as undefined on entry. +/** + * We are starting a forced unwind. This is reached either when we are unable + * to run an error handler, or when we do run an error handler and it instructs + * us to return. This treats all register values as undefined on entry. + */ .Lforce_unwind: + /* + * FROM: .Lhandle_error_switcher_pcc, .Ltry_stackless_handler, + * .Lhandle_error_test_double_fault, .Lhandle_error_test_too_many + * IRQ: deferred + * LIVE IN: mtdc + */ + // Pop the trusted stack frame. + // LIVE OUT: mtdc cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, cs0, cs1, and cgp were restored from the compartment's stack - // csp restored from the trusted stack. - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * mtdc: (still) pointer to TrustedStack + * sp: target compartment stack (restored from TrustedStack frame) + * ra, gp: target compartment context (from switcher spill frame) + * s0, s1: target callee-save registers (from switcher spill frame) + * a0, a1: return values + */ + // Zero all registers apart from ra, sp, gp, s0, s1, and return args. zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 li a0, -ECOMPARTMENTFAIL li a1, 0 cret - -// If we have run out of trusted stack, then just restore the caller's state -// and return an error value. -.Lout_of_trusted_stack: - // Restore the spilled values - clc cs0, SPILL_SLOT_cs0(csp) - clc cs1, SPILL_SLOT_cs1(csp) - clc cra, SPILL_SLOT_pcc(csp) - clc cgp, SPILL_SLOT_cgp(csp) - cincoffset csp, csp, SPILL_SLOT_SIZE - // Set the return registers - li a0, -ENOTENOUGHTRUSTEDSTACK - li a1, 0 - // Zero everything else - zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 - cret - -// If we have a possibly recoverable error, see if we have a useful error -// handler. At this point, the register state will have been saved in the -// register-save area and so we just need to set up the environment. -// -// On entry to this block, csp contains the trusted stack pointer, all other -// registers are undefined. -// -// The handler will have this type signature: -// enum ErrorRecoveryBehaviour compartment_error_handler(struct ErrorState *frame, -// size_t mcause, -// size_t mtval); +/** + * If we have a possibly recoverable error, see if we have a useful error + * handler. At this point, the register state will have been saved in the + * register-save area and so we just need to set up the environment. + * The handler will have this type signature: + * + * enum ErrorRecoveryBehaviour + * compartment_error_handler(struct ErrorState *frame, + * size_t mcause, + * size_t mtval); + */ .Lhandle_error: - // We're now out of the exception path, so make sure that mtdc contains - // the trusted stack pointer. + /* + * FROM: .Lexception_might_handle + * IRQ: deferred + * LIVE IN: sp + * + * Atlas: + * sp: pointer to TrustedStack + */ + /* + * We're now out of the exception path, so make sure that mtdc contains + * the trusted stack pointer. + */ cspecialw mtdc, csp - // Store an error value in return registers, which will be passed to the - // caller on unwind. They are currently undefined, if we leave this path - // for a forced unwind then we will return whatever is in ca0 and ca1 to - // the caller so must ensure that we don't leak anything. - li a0, -1 - li a1, 0 - - // We want to make sure we can't leak any switcher state into error - // handlers, so if we're faulting in the switcher then we should force - // unwind. We never change the base of PCC in the switcher, so we can - // check for this case by ensuring that the spilled mepcc and our current - // pcc have the same base. + /* + * Atlas: + * mtdc: pointer to TrustedStack + * sp: (still) pointer to TrustedStack + */ + +.Lhandle_error_switcher_pcc: + // FROM: above + /* + * We want to make sure we can't leak any switcher state into error + * handlers, so if we're faulting in the switcher then we should force + * unwind. We never change the base of PCC in the switcher, so we can + * check for this case by ensuring that the spilled mepcc and our current + * pcc have the same base. + */ auipcc ct0, 0 clc ct1, TrustedStack_offset_mepcc(csp) cgetbase t0, ct0 - cgetbase t1, ct1 - beq t0, t1, .Lforce_unwind + cgetbase tp, ct1 + bne t0, tp, .Lhandle_error_not_switcher + // Atlas: t1: a copy of mepcc + + /* + * Some switcher instructions' traps are handled specially, by looking at + * the offset of mepcc. Otherwise, we're off to a force unwind. + */ +.Lhandle_error_in_switcher: + auipcc ctp, %cheriot_compartment_hi(.Lswitcher_entry_first_spill) + cincoffset ctp, ctp, %cheriot_compartment_lo_i(.Lhandle_error_in_switcher) + // LIVE OUT: mtdc + bne t1, tp, .Lforce_unwind + li a0, -ENOTENOUGHSTACK + li a1, 0
We never fall through from here and `.Linstall_return_context` has no predecessors, so I think this can be simplified by: - Move `.Lhandle_error_in_switcher` above `.Linstall_return_context` - Remove the `j .Lhandle_error_in_switcher`. - Invert the condition on 918 and have the normal path be the fallthrough one.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -504,207 +831,348 @@ exception_entry_asm: csrw CSR_MSHWMB, ra #endif cspecialw mepcc, ct2 - // csp (c2) will be loaded last and will overwrite the trusted stack pointer - // with the thread's stack pointer. + + /* + * reloadRegisters restores registers in the order given, and we ensure that + * sp/csp (x2/c2) will be loaded last and will overwrite the trusted stack + * pointer with the thread's stack pointer. + */ reloadRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5, csp mret -// We are starting a forced unwind. This is reached either when we are unable -// to run an error handler, or when we do run an error handler and it instructs -// us to return. This treats all register values as undefined on entry. +/** + * We are starting a forced unwind. This is reached either when we are unable + * to run an error handler, or when we do run an error handler and it instructs + * us to return. This treats all register values as undefined on entry. + */ .Lforce_unwind: + /* + * FROM: .Lhandle_error_switcher_pcc, .Ltry_stackless_handler, + * .Lhandle_error_test_double_fault, .Lhandle_error_test_too_many + * IRQ: deferred + * LIVE IN: mtdc + */ + // Pop the trusted stack frame. + // LIVE OUT: mtdc cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, cs0, cs1, and cgp were restored from the compartment's stack - // csp restored from the trusted stack. - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * mtdc: (still) pointer to TrustedStack + * sp: target compartment stack (restored from TrustedStack frame) + * ra, gp: target compartment context (from switcher spill frame) + * s0, s1: target callee-save registers (from switcher spill frame) + * a0, a1: return values + */ + // Zero all registers apart from ra, sp, gp, s0, s1, and return args. zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 li a0, -ECOMPARTMENTFAIL li a1, 0 cret - -// If we have run out of trusted stack, then just restore the caller's state -// and return an error value. -.Lout_of_trusted_stack: - // Restore the spilled values - clc cs0, SPILL_SLOT_cs0(csp) - clc cs1, SPILL_SLOT_cs1(csp) - clc cra, SPILL_SLOT_pcc(csp) - clc cgp, SPILL_SLOT_cgp(csp) - cincoffset csp, csp, SPILL_SLOT_SIZE - // Set the return registers - li a0, -ENOTENOUGHTRUSTEDSTACK - li a1, 0 - // Zero everything else - zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 - cret - -// If we have a possibly recoverable error, see if we have a useful error -// handler. At this point, the register state will have been saved in the -// register-save area and so we just need to set up the environment. -// -// On entry to this block, csp contains the trusted stack pointer, all other -// registers are undefined. -// -// The handler will have this type signature: -// enum ErrorRecoveryBehaviour compartment_error_handler(struct ErrorState *frame, -// size_t mcause, -// size_t mtval); +/** + * If we have a possibly recoverable error, see if we have a useful error + * handler. At this point, the register state will have been saved in the + * register-save area and so we just need to set up the environment. + * The handler will have this type signature: + * + * enum ErrorRecoveryBehaviour + * compartment_error_handler(struct ErrorState *frame, + * size_t mcause, + * size_t mtval); + */ .Lhandle_error: - // We're now out of the exception path, so make sure that mtdc contains - // the trusted stack pointer. + /* + * FROM: .Lexception_might_handle + * IRQ: deferred + * LIVE IN: sp + * + * Atlas: + * sp: pointer to TrustedStack + */ + /* + * We're now out of the exception path, so make sure that mtdc contains + * the trusted stack pointer. + */ cspecialw mtdc, csp - // Store an error value in return registers, which will be passed to the - // caller on unwind. They are currently undefined, if we leave this path - // for a forced unwind then we will return whatever is in ca0 and ca1 to - // the caller so must ensure that we don't leak anything. - li a0, -1 - li a1, 0 - - // We want to make sure we can't leak any switcher state into error - // handlers, so if we're faulting in the switcher then we should force - // unwind. We never change the base of PCC in the switcher, so we can - // check for this case by ensuring that the spilled mepcc and our current - // pcc have the same base. + /* + * Atlas: + * mtdc: pointer to TrustedStack + * sp: (still) pointer to TrustedStack + */ + +.Lhandle_error_switcher_pcc: + // FROM: above + /* + * We want to make sure we can't leak any switcher state into error + * handlers, so if we're faulting in the switcher then we should force + * unwind. We never change the base of PCC in the switcher, so we can + * check for this case by ensuring that the spilled mepcc and our current + * pcc have the same base. + */ auipcc ct0, 0 clc ct1, TrustedStack_offset_mepcc(csp) cgetbase t0, ct0 - cgetbase t1, ct1 - beq t0, t1, .Lforce_unwind + cgetbase tp, ct1 + bne t0, tp, .Lhandle_error_not_switcher + // Atlas: t1: a copy of mepcc + + /* + * Some switcher instructions' traps are handled specially, by looking at + * the offset of mepcc. Otherwise, we're off to a force unwind. + */ +.Lhandle_error_in_switcher: + auipcc ctp, %cheriot_compartment_hi(.Lswitcher_entry_first_spill) + cincoffset ctp, ctp, %cheriot_compartment_lo_i(.Lhandle_error_in_switcher) + // LIVE OUT: mtdc + bne t1, tp, .Lforce_unwind + li a0, -ENOTENOUGHSTACK + li a1, 0 + // LIVE OUT: sp, a0, a1 + j .Linstall_return_context +.Lhandle_error_not_switcher:
This lacks an atlas / live ins, but the label goes away if you make the change above.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -504,207 +831,348 @@ exception_entry_asm: csrw CSR_MSHWMB, ra #endif cspecialw mepcc, ct2 - // csp (c2) will be loaded last and will overwrite the trusted stack pointer - // with the thread's stack pointer. + + /* + * reloadRegisters restores registers in the order given, and we ensure that + * sp/csp (x2/c2) will be loaded last and will overwrite the trusted stack + * pointer with the thread's stack pointer. + */ reloadRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5, csp mret -// We are starting a forced unwind. This is reached either when we are unable -// to run an error handler, or when we do run an error handler and it instructs -// us to return. This treats all register values as undefined on entry. +/** + * We are starting a forced unwind. This is reached either when we are unable + * to run an error handler, or when we do run an error handler and it instructs + * us to return. This treats all register values as undefined on entry. + */ .Lforce_unwind: + /* + * FROM: .Lhandle_error_switcher_pcc, .Ltry_stackless_handler, + * .Lhandle_error_test_double_fault, .Lhandle_error_test_too_many + * IRQ: deferred + * LIVE IN: mtdc + */ + // Pop the trusted stack frame. + // LIVE OUT: mtdc cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, cs0, cs1, and cgp were restored from the compartment's stack - // csp restored from the trusted stack. - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * mtdc: (still) pointer to TrustedStack + * sp: target compartment stack (restored from TrustedStack frame) + * ra, gp: target compartment context (from switcher spill frame) + * s0, s1: target callee-save registers (from switcher spill frame) + * a0, a1: return values + */ + // Zero all registers apart from ra, sp, gp, s0, s1, and return args. zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 li a0, -ECOMPARTMENTFAIL li a1, 0 cret - -// If we have run out of trusted stack, then just restore the caller's state -// and return an error value. -.Lout_of_trusted_stack: - // Restore the spilled values - clc cs0, SPILL_SLOT_cs0(csp) - clc cs1, SPILL_SLOT_cs1(csp) - clc cra, SPILL_SLOT_pcc(csp) - clc cgp, SPILL_SLOT_cgp(csp) - cincoffset csp, csp, SPILL_SLOT_SIZE - // Set the return registers - li a0, -ENOTENOUGHTRUSTEDSTACK - li a1, 0 - // Zero everything else - zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 - cret - -// If we have a possibly recoverable error, see if we have a useful error -// handler. At this point, the register state will have been saved in the -// register-save area and so we just need to set up the environment. -// -// On entry to this block, csp contains the trusted stack pointer, all other -// registers are undefined. -// -// The handler will have this type signature: -// enum ErrorRecoveryBehaviour compartment_error_handler(struct ErrorState *frame, -// size_t mcause, -// size_t mtval); +/** + * If we have a possibly recoverable error, see if we have a useful error + * handler. At this point, the register state will have been saved in the + * register-save area and so we just need to set up the environment. + * The handler will have this type signature: + * + * enum ErrorRecoveryBehaviour + * compartment_error_handler(struct ErrorState *frame, + * size_t mcause, + * size_t mtval); + */ .Lhandle_error: - // We're now out of the exception path, so make sure that mtdc contains - // the trusted stack pointer. + /* + * FROM: .Lexception_might_handle + * IRQ: deferred + * LIVE IN: sp + * + * Atlas: + * sp: pointer to TrustedStack + */ + /* + * We're now out of the exception path, so make sure that mtdc contains + * the trusted stack pointer. + */ cspecialw mtdc, csp - // Store an error value in return registers, which will be passed to the - // caller on unwind. They are currently undefined, if we leave this path - // for a forced unwind then we will return whatever is in ca0 and ca1 to - // the caller so must ensure that we don't leak anything. - li a0, -1 - li a1, 0 - - // We want to make sure we can't leak any switcher state into error - // handlers, so if we're faulting in the switcher then we should force - // unwind. We never change the base of PCC in the switcher, so we can - // check for this case by ensuring that the spilled mepcc and our current - // pcc have the same base. + /* + * Atlas: + * mtdc: pointer to TrustedStack + * sp: (still) pointer to TrustedStack + */ + +.Lhandle_error_switcher_pcc: + // FROM: above + /* + * We want to make sure we can't leak any switcher state into error + * handlers, so if we're faulting in the switcher then we should force + * unwind. We never change the base of PCC in the switcher, so we can + * check for this case by ensuring that the spilled mepcc and our current + * pcc have the same base. + */ auipcc ct0, 0 clc ct1, TrustedStack_offset_mepcc(csp) cgetbase t0, ct0 - cgetbase t1, ct1 - beq t0, t1, .Lforce_unwind + cgetbase tp, ct1 + bne t0, tp, .Lhandle_error_not_switcher + // Atlas: t1: a copy of mepcc + + /* + * Some switcher instructions' traps are handled specially, by looking at + * the offset of mepcc. Otherwise, we're off to a force unwind. + */ +.Lhandle_error_in_switcher: + auipcc ctp, %cheriot_compartment_hi(.Lswitcher_entry_first_spill) + cincoffset ctp, ctp, %cheriot_compartment_lo_i(.Lhandle_error_in_switcher) + // LIVE OUT: mtdc + bne t1, tp, .Lforce_unwind + li a0, -ENOTENOUGHSTACK + li a1, 0 + // LIVE OUT: sp, a0, a1 + j .Linstall_return_context +.Lhandle_error_not_switcher: // Load the interrupted thread's stack pointer into ct0 clc ct0, TrustedStack_offset_csp(csp) - // See if we can find a handler: + // Atlas: t0: interrupted thread's stack pointer + + /* + * If we have already unwound so far that the TrustedStack::frameoffset is + * pointing at TrustedStack::frames[0] -- that is, if the stack has no + * active frames on it -- then just go back to the context we came from, + * effectively parking this thread in a (slow) infinite loop.
Don't we want to hit the thread exit path in that case?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -504,207 +831,348 @@ exception_entry_asm: csrw CSR_MSHWMB, ra #endif cspecialw mepcc, ct2 - // csp (c2) will be loaded last and will overwrite the trusted stack pointer - // with the thread's stack pointer. + + /* + * reloadRegisters restores registers in the order given, and we ensure that + * sp/csp (x2/c2) will be loaded last and will overwrite the trusted stack + * pointer with the thread's stack pointer. + */ reloadRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5, csp mret -// We are starting a forced unwind. This is reached either when we are unable -// to run an error handler, or when we do run an error handler and it instructs -// us to return. This treats all register values as undefined on entry. +/** + * We are starting a forced unwind. This is reached either when we are unable + * to run an error handler, or when we do run an error handler and it instructs + * us to return. This treats all register values as undefined on entry. + */ .Lforce_unwind: + /* + * FROM: .Lhandle_error_switcher_pcc, .Ltry_stackless_handler, + * .Lhandle_error_test_double_fault, .Lhandle_error_test_too_many + * IRQ: deferred + * LIVE IN: mtdc + */ + // Pop the trusted stack frame. + // LIVE OUT: mtdc cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, cs0, cs1, and cgp were restored from the compartment's stack - // csp restored from the trusted stack. - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * mtdc: (still) pointer to TrustedStack + * sp: target compartment stack (restored from TrustedStack frame) + * ra, gp: target compartment context (from switcher spill frame) + * s0, s1: target callee-save registers (from switcher spill frame) + * a0, a1: return values + */ + // Zero all registers apart from ra, sp, gp, s0, s1, and return args. zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 li a0, -ECOMPARTMENTFAIL li a1, 0 cret - -// If we have run out of trusted stack, then just restore the caller's state -// and return an error value. -.Lout_of_trusted_stack: - // Restore the spilled values - clc cs0, SPILL_SLOT_cs0(csp) - clc cs1, SPILL_SLOT_cs1(csp) - clc cra, SPILL_SLOT_pcc(csp) - clc cgp, SPILL_SLOT_cgp(csp) - cincoffset csp, csp, SPILL_SLOT_SIZE - // Set the return registers - li a0, -ENOTENOUGHTRUSTEDSTACK - li a1, 0 - // Zero everything else - zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 - cret - -// If we have a possibly recoverable error, see if we have a useful error -// handler. At this point, the register state will have been saved in the -// register-save area and so we just need to set up the environment. -// -// On entry to this block, csp contains the trusted stack pointer, all other -// registers are undefined. -// -// The handler will have this type signature: -// enum ErrorRecoveryBehaviour compartment_error_handler(struct ErrorState *frame, -// size_t mcause, -// size_t mtval); +/** + * If we have a possibly recoverable error, see if we have a useful error + * handler. At this point, the register state will have been saved in the + * register-save area and so we just need to set up the environment. + * The handler will have this type signature: + * + * enum ErrorRecoveryBehaviour + * compartment_error_handler(struct ErrorState *frame, + * size_t mcause, + * size_t mtval); + */ .Lhandle_error: - // We're now out of the exception path, so make sure that mtdc contains - // the trusted stack pointer. + /* + * FROM: .Lexception_might_handle + * IRQ: deferred + * LIVE IN: sp + * + * Atlas: + * sp: pointer to TrustedStack + */ + /* + * We're now out of the exception path, so make sure that mtdc contains + * the trusted stack pointer. + */ cspecialw mtdc, csp - // Store an error value in return registers, which will be passed to the - // caller on unwind. They are currently undefined, if we leave this path - // for a forced unwind then we will return whatever is in ca0 and ca1 to - // the caller so must ensure that we don't leak anything. - li a0, -1 - li a1, 0 - - // We want to make sure we can't leak any switcher state into error - // handlers, so if we're faulting in the switcher then we should force - // unwind. We never change the base of PCC in the switcher, so we can - // check for this case by ensuring that the spilled mepcc and our current - // pcc have the same base. + /* + * Atlas: + * mtdc: pointer to TrustedStack + * sp: (still) pointer to TrustedStack + */ + +.Lhandle_error_switcher_pcc: + // FROM: above + /* + * We want to make sure we can't leak any switcher state into error + * handlers, so if we're faulting in the switcher then we should force + * unwind. We never change the base of PCC in the switcher, so we can + * check for this case by ensuring that the spilled mepcc and our current + * pcc have the same base. + */ auipcc ct0, 0 clc ct1, TrustedStack_offset_mepcc(csp) cgetbase t0, ct0 - cgetbase t1, ct1 - beq t0, t1, .Lforce_unwind + cgetbase tp, ct1 + bne t0, tp, .Lhandle_error_not_switcher + // Atlas: t1: a copy of mepcc + + /* + * Some switcher instructions' traps are handled specially, by looking at + * the offset of mepcc. Otherwise, we're off to a force unwind. + */ +.Lhandle_error_in_switcher: + auipcc ctp, %cheriot_compartment_hi(.Lswitcher_entry_first_spill) + cincoffset ctp, ctp, %cheriot_compartment_lo_i(.Lhandle_error_in_switcher) + // LIVE OUT: mtdc + bne t1, tp, .Lforce_unwind + li a0, -ENOTENOUGHSTACK + li a1, 0 + // LIVE OUT: sp, a0, a1 + j .Linstall_return_context +.Lhandle_error_not_switcher: // Load the interrupted thread's stack pointer into ct0 clc ct0, TrustedStack_offset_csp(csp) - // See if we can find a handler: + // Atlas: t0: interrupted thread's stack pointer + + /* + * If we have already unwound so far that the TrustedStack::frameoffset is + * pointing at TrustedStack::frames[0] -- that is, if the stack has no + * active frames on it -- then just go back to the context we came from, + * effectively parking this thread in a (slow) infinite loop. + */ clhu tp, TrustedStack_offset_frameoffset(csp) li t1, TrustedStack_offset_frames + // LIVE OUT: sp beq tp, t1, .Lreset_mepcc_and_install_context addi tp, tp, -TrustedStackFrame_size - - // ctp points to the current available trusted stack frame. cincoffset ctp, csp, tp + // Atlas: tp: pointer to current TrustedStackFrame + // a0 indicates whether we're calling a stackless error handler (0: stack, // 1: stackless) li a0, 0 // Allocate space for the register save frame on the stack. cincoffset ct0, ct0, -(16*8) - // WARNING: ENCODING SPECIFIC. - // The following depends on the fact that before-the-start values are not - // representable in the CHERIoT encoding and so will clear the tag. If - // this property changes then this will need to be replaced by a check that - // against the base of the stack. Note that this check can't be a simple - // cgetbase on ct0, because moving the address below the base sufficiently - // far that it's out of *representable* bounds will move the reported base - // value (base is a displacement from the address). +.Lhandle_error_stack_oob: + /* + * FROM: above + * LIVE IN: sp, tp, t0, a0 + */ + /* + * WARNING: ENCODING SPECIFIC. + * + * The following depends on the fact that before-the-start values are not + * representable in the CHERIoT encoding and so will clear the tag. If + * this property changes then this will need to be replaced by a check that + * against the base of the stack. Note that this check can't be a simple + * cgetbase on ct0, because moving the address below the base sufficiently + * far that it's out of *representable* bounds will move the reported base + * value (base is a displacement from the address). + */ cgettag t1, ct0 - // If there isn't enough space on the stack, see if there's a stackless - // handler. + /* + * If there isn't enough space on the stack, see if there's a stackless + * handler. + */ + // LIVE OUT: sp, tp, t0 beqz t1, .Ltry_stackless_handler clc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Set the export table pointer to point to the *start* of the export - // table. It will currently point to the entry point that was raised. - // TODO: We might want to pass this to the error handler, it might be - // useful for providing per-entry-point error results. + // Atlas: t1: pointer to callee's invoked export table entry + /* + * Set the export table pointer to point to the *start* of the export + * table. It will currently point to the entry point that was raised. + * + * TODO: We might want to pass this to the error handler, it might be + * useful for providing per-entry-point error results. + */ cgetbase s0, ct1 csetaddr ct1, ct1, s0 clhu s0, ExportTable_offset_errorHandler(ct1) - // A value of 0xffff indicates no error handler - // If we found one, use it, otherwise fall through and try to find a - // stackless handler. + /* + * A value of 0xffff indicates no error handler. If we found one, use it, + * otherwise fall through and try to find a stackless handler. + */ li s1, 0xffff +.Lhandle_error_try_stackful: + // FROM: above + // LIVE OUT: sp, tp, t0, t1, s0, a0 bne s0, s1, .Lhandler_found .Ltry_stackless_handler: + /* + * FROM: above, .Lhandle_error_stack_oob + * LIVE IN: sp, tp, t0 + * Atlas: + * sp: pointer to TrustedStack + * tp: pointer to current TrustedStackFrame + * t0: interrupted thread's stack pointer + */ + clc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Set the export table pointer to point to the *start* of the export - // table. It will currently point to the entry point that was raised. + /* + * Set the export table pointer to point to the *start* of the export + * table. It will currently point to the entry point that was raised. + */ cgetbase s0, ct1 csetaddr ct1, ct1, s0 + // Atlas: t1: pointer to callee's export table clhu s0, ExportTable_offset_errorHandlerStackless(ct1) - // A value of 0xffff indicates no error handler - // Give up if there is no error handler for this compartment. + /* + * A value of 0xffff indicates no error handler. Give up if there is no + * error handler for this compartment, having already tried any stackful + * handler. + */ li s1, 0xffff + // LIVE OUT: mtdc beq s0, s1, .Lforce_unwind - // The stack may have had its tag cleared at this point, so for stackless - // handlers we need to restore the on-entry stack. - // Get the previous trusted stack frame - - // Load the caller's csp - clc ca0, TrustedStackFrame_offset_csp(ctp) - - // If this is the top stack frame, then the csp field is the value on - // entry. If it's any other frame then we need to go to the previous one + /* + * The stack may have had its tag cleared at this point, so for stackless + * handlers we need to restore the on-entry stack. + */ + clc ct0, TrustedStackFrame_offset_csp(ctp) + // Atlas: t0: target invocation's stack pointer, as of invocation start + + /* + * If this is the top (initial) stack frame, then the csp field is the value + * on entry and it is safe to use directly. Otherwise, we reconstruct the + * stack as it would have been on compartment invocation. + */ cincoffset cs1, csp, TrustedStack_offset_frames - beq s1, t1, .Lrecovered_stack - - // The address of the stack pointer will point to the bottom of the - // caller's save area, so we set the bounds to be the base up to the - // current address. - cgetaddr a1, ca0 - cgetbase a2, ca0 + beq s1, tp, .Lrecovered_stack + +.Lhandler_stack_bounding: + // FROM: above + /* + * The address of the stack pointer will point to the bottom of the caller's + * save area created by .Lswitcher_entry_first_spill and following + * instructions, so we set the bounds to be the base up to the current + * address, giving the handler access to the entirety of this invocation's + * activation frame (except the caller save registers we spilled). + */ + cgetaddr a1, ct0 + cgetbase a2, ct0 sub a1, a1, a2 - csetaddr ca0, ca0, a2 - // The code that installs the context expects csp to be in ct0 - csetboundsexact ct0, ca0, a1 + csetaddr ct0, ct0, a2 + // The code that installs the context expects the target stack to be in ct0 + csetboundsexact ct0, ct0, a1 .Lrecovered_stack:
Missing atlas, live-ins, predecessors.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -504,207 +831,348 @@ exception_entry_asm: csrw CSR_MSHWMB, ra #endif cspecialw mepcc, ct2 - // csp (c2) will be loaded last and will overwrite the trusted stack pointer - // with the thread's stack pointer. + + /* + * reloadRegisters restores registers in the order given, and we ensure that + * sp/csp (x2/c2) will be loaded last and will overwrite the trusted stack + * pointer with the thread's stack pointer. + */ reloadRegisters cra, cgp, ctp, ct0, ct1, ct2, cs0, cs1, ca0, ca1, ca2, ca3, ca4, ca5, csp mret -// We are starting a forced unwind. This is reached either when we are unable -// to run an error handler, or when we do run an error handler and it instructs -// us to return. This treats all register values as undefined on entry. +/** + * We are starting a forced unwind. This is reached either when we are unable + * to run an error handler, or when we do run an error handler and it instructs + * us to return. This treats all register values as undefined on entry. + */ .Lforce_unwind: + /* + * FROM: .Lhandle_error_switcher_pcc, .Ltry_stackless_handler, + * .Lhandle_error_test_double_fault, .Lhandle_error_test_too_many + * IRQ: deferred + * LIVE IN: mtdc + */ + // Pop the trusted stack frame. + // LIVE OUT: mtdc cjal .Lpop_trusted_stack_frame cmove cra, ca2 - // Zero all registers apart from RA, GP, SP and return args. - // cra, cs0, cs1, and cgp were restored from the compartment's stack - // csp restored from the trusted stack. - // ca0, used for first return value - // ca1, used for second return value + /* + * Atlas: + * mtdc: (still) pointer to TrustedStack + * sp: target compartment stack (restored from TrustedStack frame) + * ra, gp: target compartment context (from switcher spill frame) + * s0, s1: target callee-save registers (from switcher spill frame) + * a0, a1: return values + */ + // Zero all registers apart from ra, sp, gp, s0, s1, and return args. zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 li a0, -ECOMPARTMENTFAIL li a1, 0 cret - -// If we have run out of trusted stack, then just restore the caller's state -// and return an error value. -.Lout_of_trusted_stack: - // Restore the spilled values - clc cs0, SPILL_SLOT_cs0(csp) - clc cs1, SPILL_SLOT_cs1(csp) - clc cra, SPILL_SLOT_pcc(csp) - clc cgp, SPILL_SLOT_cgp(csp) - cincoffset csp, csp, SPILL_SLOT_SIZE - // Set the return registers - li a0, -ENOTENOUGHTRUSTEDSTACK - li a1, 0 - // Zero everything else - zeroAllRegistersExcept ra, sp, gp, s0, s1, a0, a1 - cret - -// If we have a possibly recoverable error, see if we have a useful error -// handler. At this point, the register state will have been saved in the -// register-save area and so we just need to set up the environment. -// -// On entry to this block, csp contains the trusted stack pointer, all other -// registers are undefined. -// -// The handler will have this type signature: -// enum ErrorRecoveryBehaviour compartment_error_handler(struct ErrorState *frame, -// size_t mcause, -// size_t mtval); +/** + * If we have a possibly recoverable error, see if we have a useful error + * handler. At this point, the register state will have been saved in the + * register-save area and so we just need to set up the environment. + * The handler will have this type signature: + * + * enum ErrorRecoveryBehaviour + * compartment_error_handler(struct ErrorState *frame, + * size_t mcause, + * size_t mtval); + */ .Lhandle_error: - // We're now out of the exception path, so make sure that mtdc contains - // the trusted stack pointer. + /* + * FROM: .Lexception_might_handle + * IRQ: deferred + * LIVE IN: sp + * + * Atlas: + * sp: pointer to TrustedStack + */ + /* + * We're now out of the exception path, so make sure that mtdc contains + * the trusted stack pointer. + */ cspecialw mtdc, csp - // Store an error value in return registers, which will be passed to the - // caller on unwind. They are currently undefined, if we leave this path - // for a forced unwind then we will return whatever is in ca0 and ca1 to - // the caller so must ensure that we don't leak anything. - li a0, -1 - li a1, 0 - - // We want to make sure we can't leak any switcher state into error - // handlers, so if we're faulting in the switcher then we should force - // unwind. We never change the base of PCC in the switcher, so we can - // check for this case by ensuring that the spilled mepcc and our current - // pcc have the same base. + /* + * Atlas: + * mtdc: pointer to TrustedStack + * sp: (still) pointer to TrustedStack + */ + +.Lhandle_error_switcher_pcc: + // FROM: above + /* + * We want to make sure we can't leak any switcher state into error + * handlers, so if we're faulting in the switcher then we should force + * unwind. We never change the base of PCC in the switcher, so we can + * check for this case by ensuring that the spilled mepcc and our current + * pcc have the same base. + */ auipcc ct0, 0 clc ct1, TrustedStack_offset_mepcc(csp) cgetbase t0, ct0 - cgetbase t1, ct1 - beq t0, t1, .Lforce_unwind + cgetbase tp, ct1 + bne t0, tp, .Lhandle_error_not_switcher + // Atlas: t1: a copy of mepcc + + /* + * Some switcher instructions' traps are handled specially, by looking at + * the offset of mepcc. Otherwise, we're off to a force unwind. + */ +.Lhandle_error_in_switcher: + auipcc ctp, %cheriot_compartment_hi(.Lswitcher_entry_first_spill) + cincoffset ctp, ctp, %cheriot_compartment_lo_i(.Lhandle_error_in_switcher) + // LIVE OUT: mtdc + bne t1, tp, .Lforce_unwind + li a0, -ENOTENOUGHSTACK + li a1, 0 + // LIVE OUT: sp, a0, a1 + j .Linstall_return_context +.Lhandle_error_not_switcher: // Load the interrupted thread's stack pointer into ct0 clc ct0, TrustedStack_offset_csp(csp) - // See if we can find a handler: + // Atlas: t0: interrupted thread's stack pointer + + /* + * If we have already unwound so far that the TrustedStack::frameoffset is + * pointing at TrustedStack::frames[0] -- that is, if the stack has no + * active frames on it -- then just go back to the context we came from, + * effectively parking this thread in a (slow) infinite loop. + */ clhu tp, TrustedStack_offset_frameoffset(csp) li t1, TrustedStack_offset_frames + // LIVE OUT: sp beq tp, t1, .Lreset_mepcc_and_install_context addi tp, tp, -TrustedStackFrame_size - - // ctp points to the current available trusted stack frame. cincoffset ctp, csp, tp + // Atlas: tp: pointer to current TrustedStackFrame + // a0 indicates whether we're calling a stackless error handler (0: stack, // 1: stackless) li a0, 0 // Allocate space for the register save frame on the stack. cincoffset ct0, ct0, -(16*8) - // WARNING: ENCODING SPECIFIC. - // The following depends on the fact that before-the-start values are not - // representable in the CHERIoT encoding and so will clear the tag. If - // this property changes then this will need to be replaced by a check that - // against the base of the stack. Note that this check can't be a simple - // cgetbase on ct0, because moving the address below the base sufficiently - // far that it's out of *representable* bounds will move the reported base - // value (base is a displacement from the address). +.Lhandle_error_stack_oob: + /* + * FROM: above + * LIVE IN: sp, tp, t0, a0 + */ + /* + * WARNING: ENCODING SPECIFIC. + * + * The following depends on the fact that before-the-start values are not + * representable in the CHERIoT encoding and so will clear the tag. If + * this property changes then this will need to be replaced by a check that + * against the base of the stack. Note that this check can't be a simple + * cgetbase on ct0, because moving the address below the base sufficiently + * far that it's out of *representable* bounds will move the reported base + * value (base is a displacement from the address). + */ cgettag t1, ct0 - // If there isn't enough space on the stack, see if there's a stackless - // handler. + /* + * If there isn't enough space on the stack, see if there's a stackless + * handler. + */ + // LIVE OUT: sp, tp, t0 beqz t1, .Ltry_stackless_handler clc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Set the export table pointer to point to the *start* of the export - // table. It will currently point to the entry point that was raised. - // TODO: We might want to pass this to the error handler, it might be - // useful for providing per-entry-point error results. + // Atlas: t1: pointer to callee's invoked export table entry + /* + * Set the export table pointer to point to the *start* of the export + * table. It will currently point to the entry point that was raised. + * + * TODO: We might want to pass this to the error handler, it might be + * useful for providing per-entry-point error results. + */ cgetbase s0, ct1 csetaddr ct1, ct1, s0 clhu s0, ExportTable_offset_errorHandler(ct1) - // A value of 0xffff indicates no error handler - // If we found one, use it, otherwise fall through and try to find a - // stackless handler. + /* + * A value of 0xffff indicates no error handler. If we found one, use it, + * otherwise fall through and try to find a stackless handler. + */ li s1, 0xffff +.Lhandle_error_try_stackful: + // FROM: above + // LIVE OUT: sp, tp, t0, t1, s0, a0 bne s0, s1, .Lhandler_found .Ltry_stackless_handler: + /* + * FROM: above, .Lhandle_error_stack_oob + * LIVE IN: sp, tp, t0 + * Atlas: + * sp: pointer to TrustedStack + * tp: pointer to current TrustedStackFrame + * t0: interrupted thread's stack pointer + */ + clc ct1, TrustedStackFrame_offset_calleeExportTable(ctp) - // Set the export table pointer to point to the *start* of the export - // table. It will currently point to the entry point that was raised. + /* + * Set the export table pointer to point to the *start* of the export + * table. It will currently point to the entry point that was raised. + */ cgetbase s0, ct1 csetaddr ct1, ct1, s0 + // Atlas: t1: pointer to callee's export table clhu s0, ExportTable_offset_errorHandlerStackless(ct1) - // A value of 0xffff indicates no error handler - // Give up if there is no error handler for this compartment. + /* + * A value of 0xffff indicates no error handler. Give up if there is no + * error handler for this compartment, having already tried any stackful + * handler. + */ li s1, 0xffff + // LIVE OUT: mtdc beq s0, s1, .Lforce_unwind - // The stack may have had its tag cleared at this point, so for stackless - // handlers we need to restore the on-entry stack. - // Get the previous trusted stack frame - - // Load the caller's csp - clc ca0, TrustedStackFrame_offset_csp(ctp) - - // If this is the top stack frame, then the csp field is the value on - // entry. If it's any other frame then we need to go to the previous one + /* + * The stack may have had its tag cleared at this point, so for stackless + * handlers we need to restore the on-entry stack. + */ + clc ct0, TrustedStackFrame_offset_csp(ctp) + // Atlas: t0: target invocation's stack pointer, as of invocation start + + /* + * If this is the top (initial) stack frame, then the csp field is the value + * on entry and it is safe to use directly. Otherwise, we reconstruct the + * stack as it would have been on compartment invocation. + */ cincoffset cs1, csp, TrustedStack_offset_frames - beq s1, t1, .Lrecovered_stack - - // The address of the stack pointer will point to the bottom of the - // caller's save area, so we set the bounds to be the base up to the - // current address. - cgetaddr a1, ca0 - cgetbase a2, ca0 + beq s1, tp, .Lrecovered_stack + +.Lhandler_stack_bounding: + // FROM: above + /* + * The address of the stack pointer will point to the bottom of the caller's + * save area created by .Lswitcher_entry_first_spill and following + * instructions, so we set the bounds to be the base up to the current + * address, giving the handler access to the entirety of this invocation's + * activation frame (except the caller save registers we spilled). + */ + cgetaddr a1, ct0 + cgetbase a2, ct0 sub a1, a1, a2 - csetaddr ca0, ca0, a2 - // The code that installs the context expects csp to be in ct0 - csetboundsexact ct0, ca0, a1 + csetaddr ct0, ct0, a2 + // The code that installs the context expects the target stack to be in ct0 + csetboundsexact ct0, ct0, a1 .Lrecovered_stack: li a0, 1 .Lhandler_found: + /* + * FROM: above, .Lhandle_error_try_stackful + * LIVE IN: sp, tp, t0, t1, s0, a0 + * + * Atlas: + * sp: pointer to TrustedStack + * tp: pointer to current TrustedStackFrame + * t0: pointer to the untrusted stack to use on invocation. Either below + * all activations, in the stackful handler case, or the entire + * invocation's stack (below the spill frame created by + * .Lswitcher_entry_first_spill and following instructions). + * t1: pointer to callee's export table + * s0: offset from compartment PCC base to handler + * a0: stackful (0) or stackless (1) indicator + */ // Increment the handler invocation count. clhu s1, TrustedStackFrame_offset_errorHandlerCount(ctp) addi s1, s1, 1 csh s1, TrustedStackFrame_offset_errorHandlerCount(ctp) - // If we are in a double fault, unwind now. The low bit should be 1 while - // we are handling a fault. + /* + * The low bit should be 1 while we are handling a fault. If we are in a + * double fault (that is, the value we just wrote back has its low bit 0), + * unwind now. + */ +.Lhandle_error_test_double_fault: + // FROM: above andi ra, s1, 1 + // LIVE OUT: mtdc beqz ra, .Lforce_unwind - // If we have reached some arbitrary limit on the number of faults in a - // singe compartment calls, give up now. - // TODO: Make this a number based on something sensible, possibly something - // set per entry point. Some compartments (especially top-level ones) - // should be allowed to fault an unbounded number of times. + + /* + * If we have reached some arbitrary limit on the number of faults in a + * singe compartment calls, give up now. + * + * TODO: Make this a number based on something sensible, possibly something + * set per entry point. Some compartments (especially top-level ones) + * should be allowed to fault an unbounded number of times. + */ +.Lhandle_error_test_too_many: + // FROM: above li ra, MAX_FAULTS_PER_COMPARTMENT_CALL + // LIVE OUT: mtdc bgtu s1, ra, .Lforce_unwind // Load the pristine pcc and cgp for the invoked compartment. clc cra, ExportTable_offset_pcc(ct1) clc cgp, ExportTable_offset_cgp(ct1) - // Set the jump target to the error handler entry point - // This may result in something out-of-bounds if the compartment has a - // malicious value for their error handler (hopefully caught at link or - // load time), but if it does then we will double-fault and force unwind. + /* + * Set the jump target to the error handler entry point. This may result in + * something out-of-bounds if the compartment has a malicious value for + * their error handler (hopefully caught at link or load time), but if it + * does then we will fault when attempting the cjalr below and force unwind + * (either because the cjalr itself will raise a fault, because ra is + * untagged, or because the resulting PCC is out of bounds and instruction + * fetch fails; either case results in a forced unwind, albeit by slightly + * different paths, with .Lhandle_error_switcher_pcc relevant for the former + * and .Lhandle_error_test_double_fault for the latter. + */ cgetbase s1, cra csetaddr cra, cra, s1 cincoffset cra, cra, s0 - // If we're in an error handler with a stack, set up the stack, otherwise - // we just need to set up argument registers. + /* + * If we're in an error handler with a stack, set up the stack, otherwise + * we just need to set up argument registers. + */ +.Lhandle_error_test_stackful: + // FROM: above beqz a0, .Lset_up_stack_handler + +.Lset_up_stack_handler_stackless: + // FROM: above clw a0, TrustedStack_offset_mcause(csp) csrr a1, mtval li a2, 0 cmove csp, ct0 + // Atlas: sp: taget compartment invocation stack pointer j .Linvoke_error_handler .Lset_up_stack_handler: - // Set up the on-stack context for the callee - clc cs1, 0(csp) + /* + * FROM: .Lhandle_error_test_stackful + * LIVE IN: ra, sp, gp + * + * Atlas: + * ra: handler entrypoint (with bounds of compartment's .text) + * sp: pointer to TrustedStack + * gp: target compartment cgp + */ + /* + * Set up the on-stack context, a compartment.h:/struct ErrorState value, + * which has the same layout at a TrustedStack spill frame. + * + * These begin with a PCC. To ensure that handlers do not have access to + * values (especially, capabilities) reachable through the trapping PCC, + * we clear the tag. Handlers of course retain access to values reachable + * through their own PCC and CGP. + */ + clc cs1, TrustedStack_offset_mepcc(csp) ccleartag cs1, cs1 - csc cs1, 0(ct0) - // Source for context copy. - cincoffset ca2, csp, TrustedStack_offset_cra - // Destination for context copy - cincoffset ca3, ct0, TrustedStack_offset_cra - copyContext ca3, ca2, cs1, a4 + csc cs1, TrustedStack_offset_mepcc(ct0) + // Now copy the 15 GPRs from the trusted stack (sp) + cincoffset ca2, csp, TrustedStack_offset_cra // source + cincoffset ca3, ct0, TrustedStack_offset_cra // destination
Why did you change the comment style here? We don't put comments on the end of lines anywhere else in the switcher, do we?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
davidchisnall
@@ -713,77 +1181,147 @@ exception_entry_asm: cmove csp, ca0 .Linvoke_error_handler: - // Clear all registers except: - // cra is set by cjalr. csp and cgp are needed for the called compartment. - // ca0, used for the register state - // ca1, used for mcause - // ca2, used for mtval + /* + * FROM: above, .Lset_up_stack_handler_stackless + * LIVE IN: ra, sp, gp, a0, a1, a2 + * + * Atlas: + * ra: handler entrypoint (with bounds of compartment's .text) + * gp: target compartment cgp + * sp: target compartment invocation stack pointer + * a0, a1, a2: arguments to handler. + */ + /* + * For a stackful handler, the arguments are: + * - a0: equal to the invocation stack (sp), with a register spill frame + * here and above (the stack grows down!) + * - a1: mcause + * - a2: mtval + * + * While for stackless, the arguments are: + * - a0: mcause + * - a1: mtval + * - a2: zero + */ + + // Clear all other registers and invoke the handler zeroAllRegistersExcept ra, sp, gp, a0, a1, a2 - // Call the handler. cjalr cra +.Lhandler_return: + /* + * FROM: above, malice + * LIVE IN: mtdc, a0, sp + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * a0: handler return value + * sp: target compartment invocation stack pointer + */ + /* + * The return sentry given to the handler as part of that cjalr could be + * captured in that compartment or any of its callers. We cannot assume + * well-bracketed control flow. However, the requirements of the next block + * of code are minimal: mtdc must be a TrustedStack pointer, and we may try + * to dereference the provided sp, but we are prepared for that to trap (and + * induce forced-unwinding). + */ + /* + * Return values are compartment.h's enum ErrorRecoveryBehaviour : + * - InstallContext (0) + * - ForceUnwind (1) + * Other values are invalid and so we should do a forced unwind anyway. + */ + // LIVE OUT: mtdc + bnez a0, .Lforce_unwind + + /* + * We have been asked to install the new register context and resume. We do + * this by copying the register frame over the save area and entering the + * exception resume path. This may fault, but if it does then we will + * detect it as a double fault and forcibly unwind. + * + * The state of the target stack (sp) is expected to be common across both + * stackful and stackless handlers in the case of an InstallContext return. + * Above, in .Lset_up_stack_handler, we arranged for sp to point to a + * register spill frame (also passed in a0 for convenience from C). + * Stackless handlers are expected to arrange for sp to point to a register + * spill area before returning; compartments availing themselves of + * stackless handlers must also manage reserving space for such. + */ - // Move the return value to a register that will be cleared in a forced - // unwind and zero the return registers. - move s0, a0 - // Store an error value in return registers, which will be passed to the - // caller on unwind. - li a0, -1 - li a1, 0 - // Return values are 0 for install context, 1 for forced unwind. Anything - // that is not either of these is invalid and so we should do a forced - // unwind anyway. - bne s0, zero, .Lforce_unwind - - // We have been asked to install the new register context and resume. - // We do this by copying the register frame over the save area and entering - // the exception resume path. This may fault, but if it does then we will - // detect it as a double fault and forcibly unwind. - - // Load the trusted stack pointer to ct1 cspecialr ct1, mtdc + // Atlas: t1: pointer to TrustedStack #ifdef CONFIG_MSHWM
Maybe not this PR, but I think we can assume MSHWM now. We haven't tested without it for ages and I expect everyone to use it.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -119,33 +159,12 @@ switcher_scheduler_entry_csp: forall reloadOne, \reg1, \regs .endm -/** - * Verify the compartment stack is valid, with the expected permissions and - * unsealed. - * This macro assumes t2 and tp are available to use. - */ -.macro check_compartment_stack_integrity reg - // Check that the caller's CSP is a tagged, unsealed capability (with at - // least load permission - we'll check the other permissions properly - // later) by loading a byte. If this doesn't work, we'll fall off this - // path into the exception handler and force unwind. - clb t2, 0(\reg) - // make sure the caller's CSP has the expected permissions - cgetperm t2, \reg - li tp, COMPARTMENT_STACK_PERMISSIONS - bne tp, t2, .Lforce_unwind - // Check that the top and base are 16-byte aligned - cgetbase t2, csp - or t2, t2, sp - andi t2, t2, 0xf - bnez t2, .Lforce_unwind -.endm - /** * Zero the stack. The three operands are the base address, the top address, * and a scratch register to use. The base must be a capability but it must * be provided without the c prefix because it is used as both a capability - * and integer register. All three registers are clobbered. + * and integer register. All three registers are clobbered, but base and top + * are still capabilities afterwards.
How is top still a cap, addi instruction is used. c\base is still a cap?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice
How can this be invoked except through a cross-compartment call?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call
Line 190: What are hazard pointers? (Can't comment on a non changed line on git)
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + *
Good to state all registers except tp, t2 (which are both dead as explained below) and zero (which is unnecessary)
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture)
For tail call optimization?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall)
Maybe link to the documentation in the ISA doc also (Section 5.4)?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would.
Not sure what you mean here. Yes, if the lowest address is less than the base, this is guaranteed. But there could be other bounds check violations (higher than top, etc), no?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is
Stack sizes are guaranteed to be between the 2^12 and 2^13 range (i.e. between 4089 and 8176) ?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp
The LIVE OUT doesn't capture, for instance ct2 which is still alive and used. Not sure if LIVE OUT is helping. Just having alive, dead-again and dead-still list would suffice.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the stack base address + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /*
Simpler to state as follows: // s0 = old csp's address // csp = {base = old csp's base, length = offset of old csp's top from base, address = old csp's address, tag = old csp's tag, permission = old csp's permission}
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the stack base address + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames
What does "stack boundary between ..." mean?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the stack base address + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0
new t2 is "dead-again", so referring to it is not very helpful. Better to use the old values instead of a "dead-again" value.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the stack base address + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: pointer to the freshly populated TrustedStackFrame (still) + * t1: sealed export table entry for the target callee (still) + * a0, a1, a2, a3, a4, a5, t0: call argument values / to be zeroed (still) + * t2, s1: dead (again) + * ra, gp: dead (still) + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge gp, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. +//.Lswitch_shwm_skip_zero: + bge gp, sp, .Lswitch_after_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */
What happens if the high water mark is below the base of the stack?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +203,275 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are 16-byte aligned. We have already checked that it is + * tagged and unsealed and 8-byte aligned by virtue of surviving the stores + * above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the stack base address + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames + * sp: pointer to stack, with bounds as t2, cursor at boundary in s0 + * tp: pointer to the freshly populated TrustedStackFrame (still) + * t1: sealed export table entry for the target callee (still) + * a0, a1, a2, a3, a4, a5, t0: call argument values / to be zeroed (still) + * t2, s1: dead (again) + * ra, gp: dead (still) + */ #ifdef CONFIG_MSHWM // Read the stack high water mark (which is 16-byte aligned) csrr gp, CSR_MSHWM // Skip zeroing if high water mark >= stack pointer - bge gp, sp, .Lafter_zero - // Use stack high water mark as base address for zeroing. If this faults - // then it will trigger a force unwind. This can happen only if the caller - // is doing something bad. +//.Lswitch_shwm_skip_zero: + bge gp, sp, .Lswitch_after_zero + /* + * Use stack high water mark as base address for zeroing. If this faults + * then it will trigger a force unwind. This can happen only if the caller + * is doing something bad. + */ csetaddr ct2, csp, gp #endif - zero_stack t2, s0, gp -.Lafter_zero: + zero_stack /* base = */ t2, /* top = */ s0, /* scratch = */ gp
Finished reviewing till this line.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +206,285 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * (that is, all registers except tp and t2) + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall and, more + * generally, the ABI chapter of the CHERIoT ISA document, + * https://cheriot.org/cheriot-sail/cheriot-architecture.pdf) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would access + * below the base (lowest address) of the stack capability. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are at least 16-byte aligned. We have already checked + * that it is tagged and unsealed and at least 8-byte aligned by virtue of + * surviving the stores above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the current untrusted stack base address (the lowest address of + * the register spill we created at .Lswitch_entry_first_spill) + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1 csetaddr csp, ct2, s0 + /* + * Atlas: + * s0: address of stack boundary between caller and callee frames, that is, + * the lowest address of the register spill from + * .Lswitch_entry_first_spill) + * sp: pointer to stack, with its limit and address set to the address in + * s0. The base and permissions have not been altered from sp at
Easier to specify what the length is
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
vmurali
@@ -184,99 +206,285 @@ switcher_scheduler_entry_csp: .type __Z26compartment_switcher_entryz,@function __Z26compartment_switcher_entryz: /* - * Spill caller-save registers carefully. If we find ourselves unable to do - * so, we'll return an error to the caller (via the exception path; see - * .Lhandle_error_in_switcher). The error handling path assumes that the - * first spill is to the lowest address and guaranteed to trap if any would. + * FROM: cross-call + * FROM: malice + * IRQ: deferred + * LIVE IN: mtdc, ra, sp, gp, s0, s1, t0, t1, a0, a1, a2, a3, a4, a5 + * (that is, all registers except tp and t2) + * + * Atlas: + * mtdc: pointer to this thread's TrustedStack + * (may be 0 from buggy/malicious scheduler thread) + * ra: caller return address + * (at the moment, this is ensured because we enter via an + * IRQ-disabling forward sentry, which requires ra as the destination + * register of the cjalr the caller used, but we are not relying on + * this property, and we hope to relax the switcher's IRQ posture) + * sp: nominally, caller's stack pointer; will check integrity below + * gp: caller state, to be spilled, value unused in switcher + * s0, s1: caller state, to be spilled, value unused in switcher + * t0: possible caller argument to callee, passed or zered in switcher + * (specifically, this is the pointer to arguments beyond a0-a5 and/or + * variadic arguments) + * t1: sealed export table entry for the target callee + * (see LLVM's RISCVExpandPseudo::expandCompartmentCall and, more + * generally, the ABI chapter of the CHERIoT ISA document, + * https://cheriot.org/cheriot-sail/cheriot-architecture.pdf) + * a0, a1, a2, a3, a4, a5: possible caller arguments to callee, passed or + * zeroed in switcher. + * tp, t2: dead + */ + /* + * By virtue of making a call, the caller is indicating that all caller-save + * registers are dead. Because we are crossing a trust boundary, the + * switcher must spill callee-save registers. If we find ourselves unable + * to do so for "plausibly accidental" reasons, we'll return an error to the + * caller (via the exception path; see .Lhandle_error_in_switcher). + * Specifically, the first spill here is to the lowest address and so + * guaranteed to raise a bounds fault if any of the stores here would access + * below the base (lowest address) of the stack capability. + * + * Certain other kinds of less plausibly accidental malice (for example, an + * untagged or sealed or SD-permission-less capability in sp) will also be + * caught by this first spill. In some sense we should forcibly unwind the + * caller, but it's acceptable, in the sense that no would-be-callee can be + * harmed, to just return an error instead. + * + * Yet other kinds of less plausibly accidental malice can survive the first + * spill. For example, consider a MC-permission-less capability in sp and a + * non-capability value in s0. While the first spill will not trap, these + * forms of malice will certainly be detected in a few instructions, when we + * scrutinize sp in detail. They might (or might not) cause an intervening + * (specifically, spill) instruction to trap. Either way will result in us + * ending up in .Lcommon_force_unwind, either directly or via the exception + * handler. + * + * At entry, the register file is safe to expose to the caller, and so if + * and when we take the "just return an error" option, no changes, beyond + * populating the error return values in a0 and a1, are required. */ cincoffset ct2, csp, -SPILL_SLOT_SIZE -.Lswitcher_entry_first_spill: +.Lswitch_entry_first_spill: csc cs0, SPILL_SLOT_cs0(ct2) csc cs1, SPILL_SLOT_cs1(ct2) csc cgp, SPILL_SLOT_cgp(ct2) csc cra, SPILL_SLOT_pcc(ct2) cmove csp, ct2 - // before we access any privileged state, we can verify the - // compartment's csp is valid. If not, force unwind. - // Note that this check is purely to protect the callee, not the switcher - // itself. - check_compartment_stack_integrity csp - // The caller should back up all callee saved registers. + /* + * Atlas update: + * ra, gp, s0, s1: dead (presently, redundant caller values) + * t2: dead (presently, a copy of csp) + */ + + /* + * Before we access any privileged state, we can verify the compartment's + * csp is valid. If not, force unwind. Note that these checks are purely to + * protect the callee, not the switcher itself, which can always bail and + * forcibly unwind the caller. + * + * Make sure the caller's CSP has the expected permissions (including that + * it is a stack pointer, by virtue of being local and bearing SL) and that + * its top and base are at least 16-byte aligned. We have already checked + * that it is tagged and unsealed and at least 8-byte aligned by virtue of + * surviving the stores above. + * + * TODO for formal verification: it should be the case that after these + * tests and the size checks below, no csp-authorized instruction in the + * switcher can fault. + */ +//.Lswitch_csp_check: + cgetperm t2, csp + li tp, COMPARTMENT_STACK_PERMISSIONS + bne tp, t2, .Lcommon_force_unwind + cgetbase t2, csp + or t2, t2, sp + andi t2, t2, 0xf + bnez t2, .Lcommon_force_unwind + /* + * Atlas update: + * t2, tp: dead (again) + * sp: the caller's untrusted stack pointer, now validated and pointing at + * the callee-save register spill area we made above + */ + // mtdc should always have an offset of 0. cspecialr ct2, mtdc -#ifndef NDEBUG - // XXX: This line is useless, only for mtdc to show up in debugging. - cmove ct2, ct2 -#endif - clear_hazard_slots ct2, ctp + // Atlas update: t2: a pointer to this thread's TrustedStack structure + /* + * This is our first access via mtdc, and so it might trap, if the scheduler + * tries a cross-compartment call. That will be a fairly short trip to an + * infinite loop (see commentary in exception_entry_asm). + */ + clear_hazard_slots /* trusted stack = */ ct2, /* scratch = */ ctp + // Atlas update: tp: dead (again) - // make sure the trusted stack is still in bounds +//.Lswitch_trusted_stack_push: + /* + * TrustedStack::frames[] is a flexible array member at the end of the + * structure, and the stack of frames it represents grows *upwards* (with + * [0] the initial activation, [1] the first cross-compartment call, and so + * on). Thus, if the frame offset points "one past the end" (or futher + * out), we have no more frames available, so off we go to + * .Lswitch_trusted_stack_exhausted . + */ clhu tp, TrustedStack_offset_frameoffset(ct2) - cgetlen t2, ct2 - bgeu tp, t2, .Lout_of_trusted_stack - // we are past the stacks checks. Reload ct2; tp is still as it was - cspecialr ct2, mtdc - // ctp points to the current available trusted stack frame. + cgetlen s0, ct2 + /* + * Atlas update: + * s0: scalar length of the TrustedStack structure + * tp: scalar offset of the next available TrustedStack::frames[] element + */ + // LIVE OUT: mtdc, sp + bgeu tp, s0, .Lswitch_trusted_stack_exhausted + // Atlas update: s0: dead + // we are past the stacks checks. cincoffset ctp, ct2, tp + // Atlas update: tp: pointer to the next available TrustedStackFrame + /* + * Populate that stack frame by... + * 1. spilling the caller's stack pointer, as modified by the spills at the + * start of this function. + */ csc csp, TrustedStackFrame_offset_csp(ctp) - // We have just entered this call, so no faults triggered during this call - // yet. + /* + * 2. zeroing the number of error handler invocations (we have just entered + * this call, so no faults triggered during this call yet). + */ csh zero, TrustedStackFrame_offset_errorHandlerCount(ctp) - // For now, store a null export entry so that we don't ever try to pass - // switcher state to an error handler. + /* + * 3. For now, store a null export entry. This is largely cosmetic; we will + * not attempt to access this value before it is set to the real export + * table entry below. Should we trap, the logic at + * .Lhandle_error_switcher_pcc will cause us to force unwind, popping + * this frame before any subsequent action. + * + * TODO for formal verification: prove that this store is dead and can + * be eliminated. + */ csc cnull, TrustedStackFrame_offset_calleeExportTable(ctp) + /* + * Update the frame offset, using s1 to hold a scratch scalar. Any fault + * before this point (wrong target cap, unaligned stack, etc.) is seen as a + * fault in the caller. After writing the new TrustedSstack::frameoffset, + * any fault is seen as a callee fault. + */ clhu s1, TrustedStack_offset_frameoffset(ct2) addi s1, s1, TrustedStackFrame_size - // Update the frame offset. - // Any fault before this point (wrong target cap, unaligned stack, etc.) is - // seen as a fault in the caller. From this point after writing the new - // tstack offset, any fault is seen as a callee fault. With a null export - // table entry on the trusted stack, a fault here will cause a forced - // unwind until we set the correct one. csh s1, TrustedStack_offset_frameoffset(ct2) - // Chop off the stack. + + /* + * Chop off the stack, using... + * - s0 for the current untrusted stack base address (the lowest address of + * the register spill we created at .Lswitch_entry_first_spill) + * - s1 for the length of the stack suffix to which the callee is entitled + */ +//.Lswitch_stack_chop: cgetaddr s0, csp cgetbase s1, csp csetaddr csp, csp, s1 sub s1, s0, s1 csetboundsexact ct2, csp, s1
Hmm, what's the guarantee that this wouldn't fail? Is there a better way to carve things out so that we dont fail here instead of trying to use csetboundsexact and failing? Perhaps get the cap with the same base but smaller length?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
rmn30
@@ -27,6 +27,21 @@ #define SPILL_SLOT_pcc 24 #define SPILL_SLOT_SIZE 32 +/* + * RISC-V custom-use mcause values for RTOS internal use. + * See also include/priv/riscv.h, which defines the same names and values in C
maybe we should have a c header file with the macro definitions and include it in both places?
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
rmn30
@@ -298,98 +512,190 @@ __Z26compartment_switcher_entryz: // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas update: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas update: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ +//.Lswitch_callee_load: + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas update: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas update: ra: target function entrypoint (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. -.Lload_zero_arguments_start: - auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) - cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ +.Lswitch_load_zero_arguments_start: + auipcc cs0, %cheriot_compartment_hi(.Lswitch_zero_arguments_start) + cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lswitch_load_zero_arguments_start) + // Atlas update: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 -.Lzero_arguments_start: +.Lswitch_zero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags - bnez t1, .Lskip_interrupt_disable + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 + bnez t1, .Lswitch_skip_interrupt_disable csrsi mstatus, 0x8
This instruction actually enable interrupts, right? In that case the label should be called `skip_interrupts_enable`.
cheriot-rtos
github_2023
others
320
CHERIoT-Platform
rmn30
@@ -298,98 +512,190 @@ __Z26compartment_switcher_entryz: // Get the flags field into tp clbu tp, ExportEntry_offset_flags(ct1) + // Atlas update: tp: callee entry flags field + + // All ExportEntry state has been consulted; move to ExportTable header cgetbase s1, ct1 csetaddr ct1, ct1, s1 - // Load the target CGP + /* + * Atlas update: + * t1: pointer to the callee compartment ExportTable structure. Bounds + * still inclusive of ExportEntry array, but that will not be accessed. + */ +//.Lswitch_callee_load: + // At this point we begin loading callee compartment state. clc cgp, ExportTable_offset_cgp(ct1) - // Load the target PCC and point to the function. + // Atlas update: gp: target compartment CGP clc cra, ExportTable_offset_pcc(ct1) cincoffset cra, cra, s0 - // Get the number of registers to zero in t2 - andi t2, tp, 0x7 - // Get the interrupt-disable bit in t1 - andi t1, tp, 0x10 + // Atlas update: ra: target function entrypoint (pcc base + offset from s0) + // Zero any unused argument registers - // The low 3 bits of the flags field contain the number of arguments to - // pass. We create a small sled that zeroes them and jump into the middle - // of it at an offset defined by the number of registers that the export - // entry told us to pass. -.Lload_zero_arguments_start: - auipcc cs0, %cheriot_compartment_hi(.Lzero_arguments_start) - cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lload_zero_arguments_start) - // Change from the number of registers to pass into the number of 2-byte - // instructions to skip. + /* + * The low 3 bits of the flags field (tp) contain the number of argument + * registers to pass. We create a small sled that zeroes them in the order + * they are used as argument registers, and we jump into the middle of it at + * an offset defined by that value, preserving the prefix of the sequence. + */ +.Lswitch_load_zero_arguments_start: + auipcc cs0, %cheriot_compartment_hi(.Lswitch_zero_arguments_start) + cincoffset cs0, cs0, %cheriot_compartment_lo_i(.Lswitch_load_zero_arguments_start) + // Atlas update: s0: .Lzero_arguments_start + andi t2, tp, 0x7 // loader/types.h's ExportEntry::flags + /* + * Change from the number of registers to pass into the number of 2-byte + * instructions to skip. + */ sll t2, t2, 1 - // Offset the jump target by the number of registers that we should be - // passing. + // Offset the jump target by the number of instructions to skip cincoffset cs0, cs0, t2 // Jump into the sled. cjr cs0 -.Lzero_arguments_start: +.Lswitch_zero_arguments_start: zeroRegisters a0, a1, a2, a3, a4, a5, t0 - // Enable interrupts of the interrupt-disable bit is not set in flags - bnez t1, .Lskip_interrupt_disable + + /* + * Enable interrupts if the interrupt-disable bit is not set in flags. See + * loader/types.h's InterruptStatus and ExportEntry::InterruptStatusMask + */ + andi t1, tp, 0x10 + bnez t1, .Lswitch_skip_interrupt_disable csrsi mstatus, 0x8 -.Lskip_interrupt_disable: - // Registers passed to the callee are: - // cra (c1), csp (c2), and cgp (c3) are passed unconditionally. - // ca0-ca5 (c10-c15) and ct0 (c5) are either passed as arguments or cleared - // above. This should add up to 10 registers, with the remaining 5 being - // cleared now: +.Lswitch_skip_interrupt_disable: + /* + * Atlas: + * ra: (still) target function entrypoint + * sp: (still) pointer to stack, below compartment invocation local storage + * gp: (still) target compartment CGP + * a0, a1, a2, a3, a4, a5, t0: arguments or zeroed, as above + * tp, t1, t2, s0, s1: dead + */ + /* + * Up to 10 registers are carrying state for the callee or are properly + * zeroed. Clear the remaining 5 now. + */ +//.Lswitch_caller_dead_zeros: zeroRegisters tp, t1, t2, s0, s1 +//.Lswitch_callee_call: + /* + * "cjalr cra" simultaneously moves the live-in ra value into the *next* + * program counter and the program counter (of the instruction itself) into + * ra (while sealing it to be a backwards-arc sentry). That is, the value + * we have so carefully been keeping in ra is clobbered, but only after it + * becomes the next program counter. + */ + // LIVE OUT: * cjalr cra .globl switcher_skip_compartment_call switcher_skip_compartment_call:
Maybe a more informative name for this label would be `switcher_after_compartment_call`?
cheriot-rtos
github_2023
c
353
CHERIoT-Platform
davidchisnall
@@ -300,3 +302,9 @@ typedef union __divmoddi4(a, b, &r); return r; } + +// Return the product of a and b +[[clang::no_builtin]] arith64_u64 __multi3(arith64_u64 a, arith64_u64 b)
I think this should be s64 not u64, it doesn’t actually affect codegen.
cheriot-rtos
github_2023
cpp
346
CHERIoT-Platform
davidchisnall
@@ -258,6 +258,15 @@ namespace sched size_t mepc, size_t mtval) { +#if !defined(NDEBUG)
We don’t really use these macros anywhere, this should probably be a constexpr if on the scheduler debug flag.
cheriot-rtos
github_2023
cpp
346
CHERIoT-Platform
davidchisnall
@@ -258,6 +258,15 @@ namespace sched size_t mepc, size_t mtval) { +#if !defined(NDEBUG) + /* Ensure that we got here from an IRQ-s deferred context */ + void *returnAddress = __builtin_return_address(0); + Debug::Assert( + __builtin_cheri_type_get(returnAddress) ==
This can use Capability::type, which you can do without a cast if you make the 263 a Capability instead of a bare pointer.
cheriot-rtos
github_2023
cpp
346
CHERIoT-Platform
davidchisnall
@@ -258,6 +258,15 @@ namespace sched size_t mepc, size_t mtval) { + if constexpr (DebugScheduler) + { + /* Ensure that we got here from an IRQ-s deferred context */ + auto returnAddress = Capability{__builtin_return_address(0)};
I would write this as: ```suggestion Capability returnAddress{__builtin_return_address(0)}; ``` But I don't think it matters too much.
cheriot-rtos
github_2023
cpp
343
CHERIoT-Platform
davidchisnall
@@ -92,7 +92,7 @@ struct CheckSize #else # define EXPORT_ASSEMBLY_NAME(name, value) # define EXPORT_ASSEMBLY_EXPRESSION(name, expression, value) -# define EXPORT_ASSEMBLY_OFFSET(structure, field, name, value) +# define EXPORT_ASSEMBLY_OFFSET(structure, field, name)
```suggestion # define EXPORT_ASSEMBLY_OFFSET(structure, field, value) ``` For consistency with the other definition.
cheriot-rtos
github_2023
others
343
CHERIoT-Platform
davidchisnall
@@ -1,16 +1,29 @@ +#include <unwind-assembly.h> + +/** + * A direct re-implementation of unwind.h's cleanup_unwind() as a stackless + * error handler. + * + * If there is no registered CleanupList structure (equivalently, there's no + * CHERIOT_DURING block active at the time of the fault), then this requests + * unwnding out of the compartment. Otherwise, we will longjmp() out to the
```suggestion * unwinding out of the compartment. Otherwise, we will longjmp() out to the ```
cheriot-rtos
github_2023
others
345
CHERIoT-Platform
davidchisnall
@@ -786,6 +789,12 @@ exception_entry_asm: // Call the handler. cjalr cra + /* + * Now that we're back, defer interrupts again before we do anything that + * manipulates the TrustedStack. + */ + csrci mstatus, 0x8 +
Is this necessary? I believe it's fine to take interrupts here and we'll just handle the sentry on the way back.
cheriot-rtos
github_2023
cpp
345
CHERIoT-Platform
rmn30
@@ -10,9 +10,18 @@ int crashes = 0; std::atomic<bool> expectFault; +static void test_irqs_are_enabled() +{ + void *r = __builtin_return_address(0); + TEST(__builtin_cheri_type_get(r) == CheriSealTypeReturnSentryEnabling,
For future reference: we have `TEST_EQUAL` which gives better error messages in this situation.
cheriot-rtos
github_2023
cpp
339
CHERIoT-Platform
rmn30
@@ -0,0 +1,43 @@ +// Copyright CHERIoT Contributors. +// SPDX-License-Identifier: MIT + +#pragma once +#include <assembly-helpers.h> + +/* + * Constant to represent the raw permissions of the compartment CSP. We use + * this in the switcher, to verify the CSP comes from the compartment is exactly
```suggestion * this in the switcher, to verify the permissions of the CSP that comes from the compartment are exactly ```
cheriot-rtos
github_2023
cpp
339
CHERIoT-Platform
rmn30
@@ -21,6 +21,25 @@ struct CheckSize static constexpr bool value = Real == Expected; }; +/** + * Export a macro into assembly named `name` with value `value`. In C++, this + * macro will report an error if the provided value does not equal the constexpr + * evaluation of `expression`. + */ +# define EXPORT_ASSEMBLY_NAME(name, val) \
It took me a minute to work out why we need both of these macros. I guess this one is just the same as `EXPORT_ASSEMBLY_EXPRESSION(name, name, val)` but without a name collision? That makes me think maybe we should use `{}` around the definition below?
cheriot-rtos
github_2023
cpp
339
CHERIoT-Platform
rmn30
@@ -75,6 +75,7 @@ namespace priv constexpr size_t MCAUSE_LOAD_PAGE_FAULT = 13; constexpr size_t MCAUSE_STORE_PAGE_FAULT = 15; constexpr size_t MCAUSE_THREAD_EXIT = 24; + constexpr size_t MCAUSE_THREAD_INTERRUPT = 25; constexpr size_t MCAUSE_CHERI = 28;
This could be a separate PR but these constants are needed in error handlers so we should make them more publicly available in the SDK.