diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index 5767fb5..5db1a1c 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -44,6 +44,12 @@ jobs: run: eval $(opam env) && REFINEDRUST_ROOT=$PWD/verification/refinedrust ./verification/refinedrust/scripts/setup-coq.sh - name: Install RefinedRust type system run: eval $(opam env) && REFINEDRUST_ROOT=$PWD/verification/refinedrust ./verification/refinedrust/scripts/install-typesystem.sh + - name: Install RefinedRust stdlib + run: eval $(opam env) && REFINEDRUST_ROOT=$PWD/verification/refinedrust ./verification/refinedrust/scripts/install-stdlib.sh + - name: Generate stdlib metadata + run: eval $(opam env) && make -C verification/refinedrust/stdlib generate_stdlib + - name: Exclude RefinedRust from dune build + run: echo "(dirs :standard \ generated_code.bak refinedrust)" > verification/dune - name: install build dependencies run: sudo apt -qq -y install wget autoconf automake autotools-dev curl python3 libmpc-dev libmpfr-dev libgmp-dev gawk build-essential bison flex texinfo gperf libtool patchutils bc zlib1g-dev libexpat-dev - name: install OpenSBI dependencies @@ -54,7 +60,5 @@ jobs: run: source "$HOME/.cargo/env" && eval $(opam env) && make setup - name: make devtools run: source "$HOME/.cargo/env" && eval $(opam env) && make devtools - - name: Translate specified files using RefinedRust + - name: Translate specified files using RefinedRust and check proofs run: source "$HOME/.cargo/env" && eval $(opam env) && make verify - - name: Check proofs - run: eval $(opam env) && cd verification && dune build diff --git a/Makefile b/Makefile index 42f02fb..5932462 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,7 @@ tools: setup cp -rf $(TOOLS_SOURCE_DIR)/* $(TOOLS_WORK_DIR) verify: + rm -rf $(ACE_DIR)/security_monitor/verify/ PATH="$(RISCV_GNU_TOOLCHAIN_WORK_DIR)/bin:$(PATH)" ACE_DIR=$(ACE_DIR) LINUX_IMAGE=$(LINUX_IMAGE) CROSS_COMPILE=$(CROSS_COMPILE) PLATFORM_RISCV_XLEN=$(PLATFORM_RISCV_XLEN) PLATFORM_RISCV_ISA=$(PLATFORM_RISCV_ISA) PLATFORM_RISCV_ABI=$(PLATFORM_RISCV_ABI) $(MAKE) -C verification verify clean: diff --git a/security-monitor/Makefile b/security-monitor/Makefile index 3dae15b..5aff0fb 100644 --- a/security-monitor/Makefile +++ b/security-monitor/Makefile @@ -35,8 +35,11 @@ build: opensbi_bindings fmt cp $(SM_WORK_DIR)/$(CHAIN)/release/$(EXEC_NAME) $(SM_WORK_DIR)/ ; \ rm -rf $(OPENSBI_WORK_DIR)/ -refinedrust: build - RUSTFLAGS='$(RUSTFLAGS)' CARGO_TARGET_DIR=$(SM_WORK_DIR) INSTALL_DIR=$(ACE_DIR) $(CARGO) refinedrust $(RELEASE) $(TARGET) --features verbose +refinedrust: opensbi_bindings + echo "Generating RefinedRust translation" ;\ + mkdir -p $(SM_WORK_DIR) ; \ + RUSTFLAGS='$(RUSTFLAGS)' CARGO_TARGET_DIR=$(SM_WORK_DIR) INSTALL_DIR=$(ACE_DIR) $(CARGO) refinedrust $(RELEASE) $(TARGET) --features verbose ; \ + rm -rf $(OPENSBI_WORK_DIR)/ debug: opensbi_bindings echo "Compiling the security monitor in DEBUG mode" ;\ diff --git a/security-monitor/opensbi b/security-monitor/opensbi index a2b255b..92e8aff 160000 --- a/security-monitor/opensbi +++ b/security-monitor/opensbi @@ -1 +1 @@ -Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877 +Subproject commit 92e8affb31b21ddc5d1193cf754ce644db7b460a diff --git a/security-monitor/rust-crates/pointers_utility/src/lib.rs b/security-monitor/rust-crates/pointers_utility/src/lib.rs index 1009c2e..13f6900 100644 --- a/security-monitor/rust-crates/pointers_utility/src/lib.rs +++ b/security-monitor/rust-crates/pointers_utility/src/lib.rs @@ -5,6 +5,13 @@ #![no_main] #![feature(pointer_byte_offsets)] +// used for RefinedRust annotations +#![feature(register_tool)] +#![register_tool(rr)] +#![feature(custom_inner_attributes)] +#![rr::coq_prefix("ace_ptr")] + + mod error; use core::mem::size_of; pub use crate::error::PointerError; @@ -27,6 +34,11 @@ pub fn ptr_align(pointer: *mut usize, align_in_bytes: usize, owned_region_end: * /// the one-past-the-end address. The returned pointer is guaranteed to be valid for accesses /// of size one, if the original pointer is valid. Additional checks are required for making /// larger memory accesses. +#[rr::skip] +#[rr::params("l", "off", "lmax")] +#[rr::args("l", "off", "lmax")] +#[rr::requires("⌜l.2 + off < lmax.2⌝")] +#[rr::returns("Ok(l +ₗ off)")] pub fn ptr_byte_add_mut( pointer: *mut usize, offset_in_bytes: usize, owned_region_end: *const usize, ) -> Result<*mut usize, PointerError> { diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs index 107f2e5..bada008 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs @@ -1,22 +1,39 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::import("ace.theories.page_allocator", "page")] +#![rr::include("option")] // The order of page size in this enum must follow the increasing sizes of page to guarantee that the Ord/PartialOrd are correctly derived // for the `PageSize`. #[repr(u8)] #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] +#[rr::refined_by("page_size")] pub enum PageSize { + #[rr::pattern("Size4KiB")] Size4KiB, /// 16KiB page is used by G-stage root page table in RISC-V + #[rr::pattern("Size16KiB")] Size16KiB, + #[rr::pattern("Size2MiB")] Size2MiB, + #[rr::pattern("Size1GiB")] Size1GiB, + #[rr::pattern("Size512GiB")] Size512GiB, + #[rr::pattern("Size128TiB")] Size128TiB, } impl PageSize { + // Usually there are 512 pages of size x that can fit in a single page of size y, where y is next page size larger than x (e.g., 2MiB + // and 4KiB). + pub const TYPICAL_NUMBER_OF_PAGES_INSIDE_LARGER_PAGE: usize = 512; + + #[rr::trust_me] + #[rr::params("x")] + #[rr::args("#x")] + #[rr::returns("page_size_in_bytes_Z x")] pub fn in_bytes(&self) -> usize { match self { PageSize::Size128TiB => 8 * 512 * 512 * 512 * 512 * 256, @@ -28,6 +45,10 @@ impl PageSize { } } + #[rr::trust_me] + #[rr::params("x")] + #[rr::args("#x")] + #[rr::returns("<#>@{option} page_size_smaller x")] pub fn smaller(&self) -> Option { match self { PageSize::Size128TiB => Some(PageSize::Size512GiB), @@ -39,6 +60,10 @@ impl PageSize { } } + #[rr::trust_me] + #[rr::params("x")] + #[rr::args("#x")] + #[rr::returns("<#>@{option} page_size_larger x")] pub fn larger(&self) -> Option { match self { PageSize::Size128TiB => None, @@ -50,6 +75,10 @@ impl PageSize { } } + #[rr::trust_me] + #[rr::params("x")] + #[rr::args("#x")] + #[rr::returns("page_size_multiplier x")] pub fn number_of_smaller_pages(&self) -> usize { match self { PageSize::Size128TiB => 256, @@ -61,10 +90,12 @@ impl PageSize { } } + #[rr::returns("Size128TiB")] pub fn largest() -> PageSize { PageSize::Size128TiB } + #[rr::returns("Size4KiB")] pub fn smallest() -> PageSize { PageSize::Size4KiB } diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_table.rs b/security-monitor/src/core/architecture/riscv/mmu/page_table.rs index 3d115f0..7dc35dd 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_table.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_table.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::import("ace.theories.page_table", "page_table")] use crate::core::architecture::mmu::page_table_entry::{LogicalPageTableEntry, PageTableEntry}; use crate::core::architecture::mmu::page_table_level::PageTableLevel; use crate::core::architecture::mmu::paging_system::PagingSystem; @@ -26,16 +27,34 @@ use sha2::digest::crypto_common::generic_array::GenericArray; /// /// During the execution of a confidential hart, MMU might traverse the serialized representation. Our changes to this representation must /// be atomic, so that MMU always reads a valid configuration. +/// +/// Model: We model the page table by a `page_table_tree` which captures the inherent tree +/// structure. +#[rr::refined_by("pt_logical" : "page_table_tree")] +/// Invariant: We assert that there exists a serialized byte-level representation of the page table +/// tree in the form of a page. +#[rr::exists("pt_byte" : "page")] +#[rr::invariant("is_byte_level_representation pt_logical p_byte")] pub struct PageTable { + #[rr::field("pt_get_level pt_logical")] level: PageTableLevel, + #[rr::field("pt_get_system pt_logical")] paging_system: PagingSystem, /// Serialized representation stores the architecture-specific, binary representation of the page table configuration that is read by /// the MMU. + #[rr::field("pt_byte")] serialized_representation: Page, /// Logical representation stores a strongly typed page table configuration used by security monitor. + #[rr::field("<#> (pt_get_entries pt_logical)")] logical_representation: Vec, } +/// Verification: what do we want to prove? +/// - the page table is structured according to RISC-V spec +/// - the page table has unique ownership over all "reachable" memory +/// - copying a page table from non-confidential memory only reads from non-confidential memory +/// - if input to copy_.. is not a valid page table, fail correctly +#[rr::skip] impl PageTable { /// This functions copies recursively page table structure from non-confidential memory to confidential memory. It /// allocated a page in confidential memory for every page table. After this function executes, a valid page table @@ -44,6 +63,12 @@ impl PageTable { /// If the input page table configuration has two page table entries that point to the same page, then the copied page table /// configuration will have two page table entries pointing to different pages of the same content. /// + /// # Safety + /// + /// This function expects a page table structure at the given `address` and will dereference + /// this pointer and further pointers parsed from the page table structure, as long as they are + /// in non-confidential memory. + /// /// # Guarantees /// /// The copied page table configuration: @@ -53,6 +78,30 @@ impl PageTable { /// * all `pointer` page table entries point to another page table belonging to the same page table configuration. /// /// This is a recursive function, which deepest execution is not larger than the number of paging system levels. + #[rr::params("l_nonconf", "ps", "level")] + #[rr::args("l_nonconf", "ps", "level")] + /// Precondition: We require the permission to copy from arbitrary non-confidential memory. + /// Note that this implicitly specifies that we do not read from confidential memory. + /// TODO: might need atomicity? + #[rr::requires(#iris "permission_to_read_from_nonconfidential_mem")] + // TODO: want to prove eventually + //#[rr::ensures("value_has_security_level Hypervisor x")] + // eventually: promote x from Hypervisor to confidential_vm_{new_id} + #[rr::exists("x" : "result page_table_tree core_error_Error")] + #[rr::returns("<#>@{result} x")] + /* Alternative specification: + // We won't need this for security, but if we were to trust the hypervisor, we could + // prove this specification. + // + // SPEC 2: + // For functional correctness (trusting the hypervisor): + #[rr::params("l_nonconf", "ps", "level" : "nat", "pt" : "page_table_tree")] + #[rr::args("l_nonconf", "ps", "level")] + #[rr::requires(#iris "address_has_valid_page_table l_nonconf pt")] + #[rr::exists("pt'")] + #[rr::ensures("related_page_tables pt pt'")] + #[rr::returns("#Ok(pt')")] // (or out of memory) + */ pub fn copy_from_non_confidential_memory( address: NonConfidentialMemoryAddress, paging_system: PagingSystem, level: PageTableLevel, ) -> Result { @@ -91,6 +140,11 @@ impl PageTable { /// Creates an empty page table for the given page table level. Returns error if there is not enough memory to allocate this data /// structure. + #[rr::params("system", "level")] + #[rr::args("system", "level")] + #[rr::exists("res")] + #[rr::returns("<#>@{result} res")] + #[rr::returns("if_Ok res (λ tree, tree = make_empty_page_tree system level)")] pub fn empty(paging_system: PagingSystem, level: PageTableLevel) -> Result { let serialized_representation = PageAllocator::acquire_page(paging_system.memory_page_size(level))?.zeroize(); let number_of_entries = serialized_representation.size().in_bytes() / paging_system.entry_size(); @@ -188,6 +242,9 @@ impl PageTable { } /// Returns the physical address in confidential memory of the page table configuration. + #[rr::params("pt")] + #[rr::args("#pt")] + #[rr::returns("pt_get_serialized_loc pt")] pub fn address(&self) -> usize { self.serialized_representation.start_address() } @@ -197,6 +254,12 @@ impl PageTable { } /// Set a new page table entry at the given index, replacing whatever was there before. + #[rr::params("pt", "γ", "vpn", "pte")] + #[rr::args("(#pt, γ)", "vpn", "pte")] + /// Precondition: The vpn is valid for the number of entries of this page table. + #[rr::requires("vpn < pt_number_of_entries pt")] + /// Postcondition: The entry has been set correctly. + #[rr::oberve("γ": "pt_set_entry pt vpn pte")] fn set_entry(&mut self, virtual_page_number: usize, entry: LogicalPageTableEntry) { self.serialized_representation.write(self.paging_system.entry_size() * virtual_page_number, entry.serialize()).unwrap(); let entry_to_remove = core::mem::replace(&mut self.logical_representation[virtual_page_number], entry); @@ -206,6 +269,8 @@ impl PageTable { } /// Recursively clears the entire page table configuration, releasing all pages to the PageAllocator. + #[rr::params("x")] + #[rr::args("x")] pub fn deallocate(mut self) { let mut pages = Vec::with_capacity(self.logical_representation.len() + 1); pages.push(self.serialized_representation.deallocate()); diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs b/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs index 5238adf..866fe09 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::import("ace.theories.page_table", "page_table")] use super::specification::*; use crate::core::architecture::mmu::page_table::PageTable; use crate::core::architecture::SharedPage; @@ -11,10 +12,18 @@ use alloc::boxed::Box; /// Logical page table entry contains variants specific to the security monitor architecture. These new variants distinguish among certain /// types (e.g., shared page, confidential data page) that are not covered by the general RISC-V specification. +#[rr::refined_by("logical_page_table_entry")] pub(super) enum LogicalPageTableEntry { + #[rr::pattern("PointerToNextPageTable" $ "next", "conf")] + #[rr::refinement("-[ #(#next); #conf]")] PointerToNextPageTable(Box), + #[rr::pattern("PageWithConfidentialVmData" $ "p", "conf", "perm")] + #[rr::refinement("-[ #(#p); #conf; #perm]")] PageWithConfidentialVmData(Box>), + #[rr::pattern("PageSharedWithHypervisor" $ "sp", "conf", "perm")] + #[rr::refinement("-[ #sp; #conf; #perm]")] PageSharedWithHypervisor(SharedPage), + #[rr::pattern("UnmappedPage")] NotMapped, } @@ -45,9 +54,15 @@ impl LogicalPageTableEntry { } /// Page table entry corresponds to entires defined by the RISC-V spec. +#[rr::refined_by("page_table_entry")] pub(super) enum PageTableEntry { + #[rr::pattern("UnmappedPTE")] NotMapped, + #[rr::pattern("NextPTE" $ "p")] + #[rr::refinement("-[ #p]")] PointerToNextPageTable(*mut usize), + #[rr::pattern("DataPTE" $ "p")] + #[rr::refinement("-[ #p]")] PointerToDataPage(*mut usize), } @@ -63,6 +78,7 @@ impl PageTableEntry { /// Decodes a raw pointer from the page table entry. It is up to the user to decide how to deal with this pointer and check if it is /// valid and is in confidential or non-confidential memory. pub fn decode_pointer(raw_entry: usize) -> *mut usize { + // TODO: think how we can justify the integer-pointer cast ((raw_entry & !CONFIGURATION_BIT_MASK) << ADDRESS_SHIFT) as *mut usize } } diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_table_level.rs b/security-monitor/src/core/architecture/riscv/mmu/page_table_level.rs index 14a72d4..854dbca 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_table_level.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_table_level.rs @@ -3,15 +3,25 @@ // SPDX-License-Identifier: Apache-2.0 #[derive(Copy, Clone, Debug, PartialEq)] +#[rr::refined_by("page_table_level")] pub enum PageTableLevel { + #[rr::pattern("PTLevel5")] Level5, + #[rr::pattern("PTLevel4")] Level4, + #[rr::pattern("PTLevel3")] Level3, + #[rr::pattern("PTLevel2")] Level2, + #[rr::pattern("PTLevel1")] Level1, } impl PageTableLevel { + #[rr::trust_me] + #[rr::params("x")] + #[rr::args("#x")] + #[rr::returns("<#>@{option} (page_table_level_lower x)")] pub fn lower(&self) -> Option { match self { Self::Level5 => Some(Self::Level4), diff --git a/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs b/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs index 4451481..ee95f88 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs @@ -9,7 +9,9 @@ use crate::core::memory_layout::ConfidentialVmPhysicalAddress; // TODO: add more 2nd-level paging systems corresponding to 3 and 4 level page // tables. #[derive(Debug, Copy, Clone)] +#[rr::refined_by("paging_system")] pub enum PagingSystem { + #[rr::pattern("Sv57x4")] Sv57x4, } @@ -26,6 +28,10 @@ impl PagingSystem { } } + #[rr::skip] + #[rr::params("system")] + #[rr::args("#system")] + #[rr::returns("paging_system_highest_level system")] pub fn levels(&self) -> PageTableLevel { match self { PagingSystem::Sv57x4 => PageTableLevel::Level5, diff --git a/security-monitor/src/core/memory_layout/confidential_memory_address.rs b/security-monitor/src/core/memory_layout/confidential_memory_address.rs index 4e6f40f..217f1ed 100644 --- a/security-monitor/src/core/memory_layout/confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/confidential_memory_address.rs @@ -7,32 +7,74 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset}; /// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region. #[repr(transparent)] #[derive(Debug, PartialEq)] -pub struct ConfidentialMemoryAddress(*mut usize); +/// Model: The memory address is represented by the location in memory. +#[rr::refined_by("l" : "loc")] +/// We require the ghost state for the global memory layout to be available. +#[rr::context("onceG Σ memory_layout")] +/// Invariant: The global memory layout has been initialized. +#[rr::exists("MEMORY_CONFIG")] +#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] +/// Invariant: The address is in the confidential part of the memory layout. +#[rr::invariant("(MEMORY_CONFIG.(conf_start).2 ≤ l.2 < MEMORY_CONFIG.(conf_end).2)%Z")] +pub struct ConfidentialMemoryAddress(#[rr::field("l")] *mut usize); +/// Verification: We require the ghost state for the global memory layout to be available. +#[rr::context("onceG Σ memory_layout")] impl ConfidentialMemoryAddress { + #[rr::params("l", "MEMORY_CONFIG")] + #[rr::args("l")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: The address is in the confidential region of the global memory layout. + #[rr::requires("(MEMORY_CONFIG.(conf_start).2 ≤ l.2 < MEMORY_CONFIG.(conf_end).2)%Z")] + #[rr::returns("l")] pub(super) fn new(address: *mut usize) -> Self { Self(address) } - // TODO: check if needed. If yes, make sure the raw pointer is not used incorrectly Currently we only use it during - // creation of the heap allocator structure. It would be good to get rid of this because it requires extra safety - // guarantees for parallel execution of the security monitor + // TODO: check if needed. If yes, make sure the raw pointer is not used incorrectly + // Currently we only use it during creation of the heap allocator structure. It + // would be good to get rid of this because it requires extra safety guarantees for + // parallel execution of the security monitor + #[rr::params("l")] + #[rr::args("l")] + #[rr::returns("l")] pub unsafe fn into_mut_ptr(self) -> *mut usize { self.0 } + #[rr::params("l")] + #[rr::args("#l")] + #[rr::returns("l")] pub unsafe fn to_ptr(&self) -> *const u8 { self.0 as *const u8 } + #[rr::only_spec] + #[rr::params("l")] + #[rr::args("#l")] + #[rr::returns("l.2")] pub fn as_usize(&self) -> usize { + // TODO: check if we need to expose the pointer. + // If not, use addr() instead. + // self.0.addr() self.0 as usize } + #[rr::only_spec] + #[rr::params("l", "align")] + #[rr::args("#l", "align")] + /// Postcondition: Verifies that the pointer is aligned to the given alignment. + #[rr::returns("bool_decide (l `aligned_to` (Z.to_nat align))")] pub fn is_aligned_to(&self, align: usize) -> bool { self.0.is_aligned_to(align) } + #[rr::only_spec] + #[rr::params("this", "other")] + #[rr::args("#this", "other")] + /// Postcondition: Compute the offset. + #[rr::returns("other.2 - this.2")] pub fn offset_from(&self, pointer: *const usize) -> isize { ptr_byte_offset(pointer, self.0) } @@ -43,27 +85,50 @@ impl ConfidentialMemoryAddress { /// # Safety /// /// The caller must ensure that the address at given offset is still within the confidential memory region. + // TODO: can we require the offset to be a multiple of usize? + #[rr::only_spec] + #[rr::params("l", "off", "lmax", "MEMORY_CONFIG")] + #[rr::args("#l", "off", "lmax")] + /// Precondition: The offset address is in the given range. + #[rr::requires("l.2 + off < lmax.2")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: The maximum (and thus the offset address) is in the confidential memory range. + #[rr::requires("lmax.2 < MEMORY_CONFIG.(conf_end).2")] + /// Postcondition: The offset pointer is in the confidential memory range. + #[rr::returns("Ok(#(l +ₗ off))")] pub unsafe fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInConfidentialMemory())?; Ok(ConfidentialMemoryAddress(pointer)) } /// Reads usize-sized sequence of bytes from the confidential memory region. - /// /// # Safety /// - /// Caller must ensure that the pointer is not used by two threads simultaneously. See `ptr::read_volatile` for - /// safety concerns - pub unsafe fn read_volatile(&self) -> usize { + /// Caller must ensure that the pointer is not used by two threads simultaneously and that it is correctly aligned for usize. + /// See `ptr::read_volatile` for safety concerns + // TODO: currently only_spec because shim registration for read_volatile doesn't work + // TODO require that lifetime [lft_el] is actually alive + #[rr::only_spec] + #[rr::params("l", "z", "lft_el")] + #[rr::args("#l")] + #[rr::requires(#iris "l ◁ₗ[π, Shared lft_el] #z @ ◁ int usize_t")] + #[rr::returns("z")] + pub unsafe fn read_volatile<'a>(&'a self) -> usize { self.0.read_volatile() } /// Writes usize-sized sequence of bytes to the confidential memory region. - /// /// # Safety /// - /// Caller must ensure that the pointer is not used by two threads simultaneously. See `ptr::write_volatile` for - /// safety concerns + /// Caller must ensure that the pointer is not used by two threads simultaneously and that it is correctly aligned for usize. + /// See `ptr::write_volatile` for safety concerns + // TODO: currently only_spec because shim registration for write_volatile doesn't work + #[rr::only_spec] + #[rr::params("l", "z", "x")] + #[rr::args("#l", "x")] + #[rr::requires(#type "l" : "z" @ "int usize_t")] + #[rr::ensures(#type "l" : "x" @ "int usize_t")] pub unsafe fn write_volatile(&self, value: usize) { self.0.write_volatile(value); } diff --git a/security-monitor/src/core/memory_layout/mod.rs b/security-monitor/src/core/memory_layout/mod.rs index 9db4b03..19525b8 100644 --- a/security-monitor/src/core/memory_layout/mod.rs +++ b/security-monitor/src/core/memory_layout/mod.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::import("ace.theories.memory_layout", "memory_layout")] pub use confidential_memory_address::ConfidentialMemoryAddress; pub use confidential_vm_physical_address::ConfidentialVmPhysicalAddress; pub use non_confidential_memory_address::NonConfidentialMemoryAddress; @@ -17,14 +18,32 @@ mod non_confidential_memory_address; /// MEMORY_LAYOUT is a static variable (private to this module) that is set during the system boot and never changes /// later -- this is guaranteed by Once<>. It stores an instance of the `MemoryLayout`. The only way to get a shared /// access to this instance is by calling `MemoryLayout::read()` function. +#[rr::name("MEMORY_LAYOUT")] static MEMORY_LAYOUT: Once = Once::new(); /// Provides an interface to offset addresses that are guaranteed to remain inside the same memory region, i.e., /// confidential or non-confidential memory. +/// +/// Model: A Coq `memory_layout` record containing the memory ranges for confidential and +/// non-confidential memory. +#[rr::refined_by("ml" : "memory_layout")] +/// Invariant: The starts of the region have addresses less-or-equal-to the ends of the regions. +#[rr::invariant("ml.(non_conf_start).2 ≤ ml.(non_conf_end).2")] +#[rr::invariant("ml.(conf_start).2 ≤ ml.(conf_end).2")] +/// Invariant: the non-confidential memory region comes before the confidential memory region. +// TODO: this could be generalized to the regions being disjoint +#[rr::invariant("ml.(non_conf_end).2 ≤ ml.(conf_start).2")] +/// Invariant: the bounds of the confidential memory region are aligned to 4KiB pages +#[rr::invariant("ml.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB)")] +#[rr::invariant("ml.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB)")] pub struct MemoryLayout { + #[rr::field("ml.(non_conf_start)")] non_confidential_memory_start: *mut usize, + #[rr::field("ml.(non_conf_end)")] non_confidential_memory_end: *const usize, + #[rr::field("ml.(conf_start)")] confidential_memory_start: *mut usize, + #[rr::field("ml.(conf_end)")] confidential_memory_end: *const usize, } @@ -36,6 +55,7 @@ pub struct MemoryLayout { unsafe impl Send for MemoryLayout {} unsafe impl Sync for MemoryLayout {} +#[rr::context("onceG Σ memory_layout")] impl MemoryLayout { const NOT_INITIALIZED_MEMORY_LAYOUT: &'static str = "Bug. Could not access MemoryLayout because is has not been initialized"; @@ -46,6 +66,36 @@ impl MemoryLayout { /// # Safety /// /// This function must be called only once by the initialization procedure during the boot of the system. + #[rr::only_spec] + #[rr::params("non_conf_start", "non_conf_end", "conf_start", "conf_end")] + #[rr::args("non_conf_start", "non_conf_end", "conf_start", "conf_end")] + /// Precondition: The non-confidential memory should have positive size. + #[rr::requires("non_conf_start.2 < non_conf_end.2")] + /// Precondition: The non-condidential memory should preceed and not overlap with confidential memory. + #[rr::requires("non_conf_end.2 ≤ conf_start.2")] + /// Precondition: The confidential memory should have positive size. + #[rr::requires("conf_start.2 < conf_end.2")] + /// Precondition: The global MEMORY_LAYOUT has not been initialized yet. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" None")] + /// Postcondition: There exists a result -- failure is always an option + #[rr::exists("res")] + /// Postcondition: failure due to low memory can occur if there is no sufficiently aligned + /// confidential address + #[rr::ensures("if_Err res (λ err, (conf_start.2 - conf_end.2 ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)")] + /// Postcondition: if we return Ok, we get a new confidential memory range that is correctly + /// aligned for the smallest page size and is a subrange of [conf_start, conf_end) + #[rr::ensures( + "if_Ok res (λ ok, ∃ conf_start' conf_end', conf_start' `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧ + conf_end' `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧ + conf_start.2 ≤ conf_start'.2 ∧ conf_end'.2 ≤ conf_end.2 ∧ conf_start'.2 ≤ conf_end'.2 ∧ + ok = -[ #conf_start'; #conf_end'])%Z" + )] + /// Postcondition: if we return Ok, the MEMORY_LAYOUT has been initialized. + #[rr::ensures(#iris "once_initialized π \"MEMORY_LAYOUT\" (match res with + | Ok *[ #conf_start; #conf_end] => Some (mk_memory_layout non_conf_start non_conf_end conf_start conf_end) + | _ => None + end)")] + #[rr::returns("<#>@{result} res")] pub unsafe fn init( non_confidential_memory_start: *mut usize, non_confidential_memory_end: *const usize, confidential_memory_start: *mut usize, mut confidential_memory_end: *const usize, @@ -82,6 +132,13 @@ impl MemoryLayout { /// Offsets an address in the confidential memory by a given number of bytes. Returns an error if the resulting /// address is not in the confidential memory region. + #[rr::only_spec] + #[rr::params("bounds", "l", "off")] + #[rr::args("#bounds", "#l", "off")] + /// Precondition: The offset address is in confidential memory. + #[rr::requires("l.2 + off < bounds.(conf_end).2")] + /// Postcondition: The offset pointer is in confidential memory. + #[rr::returns("Ok (#(l +ₗ off))")] pub fn confidential_address_at_offset( &self, address: &ConfidentialMemoryAddress, offset_in_bytes: usize, ) -> Result { @@ -90,6 +147,16 @@ impl MemoryLayout { /// Offsets an address in the confidential memory by a given number of bytes. Returns an error if the resulting /// address is outside the confidential memory region or exceeds the given upper bound. + #[rr::only_spec] + #[rr::params("bounds", "l", "off", "bound")] + #[rr::args("#bounds", "#l", "off", "bound")] + /// Precondition: The offset address is in confidential memory. + #[rr::requires("l.2 + off < bounds.(conf_end).2")] + /// Precondition: The bounds we are checking are within confidential memory. + #[rr::requires("bound.2 ≤ bounds.(conf_end).2")] + /// Postcondition: Then we can correctly offset the address and ensure it is in confidential + /// memory. + #[rr::returns("Ok (#(l +ₗ off))")] pub fn confidential_address_at_offset_bounded( &self, address: &ConfidentialMemoryAddress, offset_in_bytes: usize, upper_bound: *const usize, ) -> Result { @@ -99,6 +166,14 @@ impl MemoryLayout { /// Offsets an address in the non-confidential memory by given number of bytes. Returns an error if the resulting /// address is outside the non-confidential memory region. + #[rr::only_spec] + #[rr::params("bounds", "l", "off")] + #[rr::args("#bounds", "#l", "off")] + /// Precondition: The offset address is in non-confidential memory. + #[rr::requires("l.2 + off < bounds.(non_conf_end).2")] + /// Postcondition: Then we can correctly offset the address and ensure it is in + /// non-confidential memory. + #[rr::returns("Ok (#(l +ₗ off))")] pub fn non_confidential_address_at_offset( &self, address: &NonConfidentialMemoryAddress, offset_in_bytes: usize, ) -> Result { @@ -107,6 +182,10 @@ impl MemoryLayout { } /// Returns true if the raw pointer is inside the non-confidential memory. + #[rr::only_spec] + #[rr::params("bounds", "l")] + #[rr::args("#bounds", "l")] + #[rr::returns("bool_decide (bounds.(non_conf_start).2 ≤ l.2 ∧ l.2 < bounds.(non_conf_end).2)")] pub fn is_in_non_confidential_range(&self, address: *const usize) -> bool { self.non_confidential_memory_start as *const usize <= address && address < self.non_confidential_memory_end } @@ -117,6 +196,7 @@ impl MemoryLayout { /// /// Caller must guarantee that there is no other thread that can write to confidential memory during execution of /// this function. + // TODO(verification): we need to come up with a mechanism to acquire ownership of all memory pub unsafe fn clear_confidential_memory(&self) { // We can safely cast the below offset to usize because the constructor guarantees that the confidential memory // range is valid, and so the memory size must be a valid usize @@ -128,10 +208,22 @@ impl MemoryLayout { }); } + /// Get a pointer to the globally initialized `MemoryLayout`. + /// Panics if the memory layout has not been initialized yet. + #[rr::only_spec] + #[rr::params("x")] + /// Precondition: The memory layout has been initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some x)")] + #[rr::returns("#x")] pub fn read() -> &'static MemoryLayout { MEMORY_LAYOUT.get().expect(Self::NOT_INITIALIZED_MEMORY_LAYOUT) } + /// Get the boundaries of confidential memory as a (start, end) tuple. + #[rr::only_spec] + #[rr::params("bounds")] + #[rr::args("#bounds")] + #[rr::returns("-[#bounds.(conf_start).2; #bounds.(conf_end).2]")] pub fn confidential_memory_boundary(&self) -> (usize, usize) { (self.confidential_memory_start as usize, self.confidential_memory_end as usize) } diff --git a/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs b/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs index eca2cc2..ba0fcd5 100644 --- a/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs @@ -8,11 +8,30 @@ use pointers_utility::ptr_byte_add_mut; /// The wrapper over a raw pointer that is guaranteed to be an address located in the non-confidential memory region. #[repr(transparent)] #[derive(Debug)] -pub struct NonConfidentialMemoryAddress(*mut usize); +/// Model: The memory address is represented by the location in memory. +#[rr::refined_by("l" : "loc")] +/// We require the ghost state for the global memory layout to be available. +#[rr::context("onceG Σ memory_layout")] +/// Invariant: The global memory layout has been initialized. +#[rr::exists("MEMORY_CONFIG")] +#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] +/// Invariant: The address is in non-confidential memory. +#[rr::invariant("(MEMORY_CONFIG.(non_conf_start).2 ≤ l.2 < MEMORY_CONFIG.(non_conf_end).2)%Z")] +pub struct NonConfidentialMemoryAddress(#[rr::field("l")] *mut usize); +#[rr::context("onceG Σ memory_layout")] impl NonConfidentialMemoryAddress { /// Constructs an address in a non-confidential memory. Returns error if the address is outside non-confidential /// memory. + #[rr::trust_me] + #[rr::params("l", "bounds")] + #[rr::args("l")] + /// Precondition: The global memory layout has been initialized. + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some bounds)")] + /// Precondition: The location is in non-confidential memory. + #[rr::requires("bounds.(non_conf_start).2 ≤ l.2 < bounds.(non_conf_end).2")] + /// Postcondition: The non-confidential memory address is correctly initialized. + #[rr::returns("Ok (#l)")] pub fn new(address: *mut usize) -> Result { match MemoryLayout::read().is_in_non_confidential_range(address) { false => Err(Error::AddressNotInNonConfidentialMemory()), @@ -27,6 +46,18 @@ impl NonConfidentialMemoryAddress { /// /// The caller must ensure that the address advanced by the offset is still within the non-confidential memory /// region. + // TODO: can we require the offset to be a multiple of usize? + #[rr::only_spec] + #[rr::params("l", "off", "lmax", "MEMORY_CONFIG")] + #[rr::args("#l", "off", "lmax")] + /// Precondition: The offset address is in the given range. + #[rr::requires("l.2 + off < lmax.2")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: The maximum (and thus the offset address) is in the non-confidential memory range. + #[rr::requires("lmax.2 < MEMORY_CONFIG.(non_conf_end).2")] + /// Postcondition: The offset pointer is in the non-confidential memory range. + #[rr::returns("Ok(#(l +ₗ off))")] pub unsafe fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInNonConfidentialMemory())?; Ok(NonConfidentialMemoryAddress(pointer)) @@ -52,11 +83,21 @@ impl NonConfidentialMemoryAddress { self.0.write_volatile(value); } + #[rr::params("l")] + #[rr::args("#l")] + #[rr::returns("l")] pub fn as_ptr(&self) -> *const usize { self.0 } + #[rr::only_spec] + #[rr::params("l")] + #[rr::args("#l")] + #[rr::returns("l.2")] pub fn usize(&self) -> usize { + // TODO: check if we need to expose the pointer. + // If not, use addr() instead. + // self.0.addr() self.0 as usize } } diff --git a/security-monitor/src/core/page_allocator/allocator.rs b/security-monitor/src/core/page_allocator/allocator.rs index 75436c8..67aeef2 100644 --- a/security-monitor/src/core/page_allocator/allocator.rs +++ b/security-monitor/src/core/page_allocator/allocator.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::include("option")] +#![rr::include("alloc")] +#![rr::include("btreemap")] +#![rr::include("vec")] +#![rr::include("spin")] +#![rr::import("ace.theories.page_allocator", "page_allocator")] use super::page::{Page, UnAllocated}; use crate::core::architecture::PageSize; use crate::core::memory_layout::{ConfidentialMemoryAddress, MemoryLayout}; @@ -10,17 +16,34 @@ use alloc::vec::Vec; use spin::{Once, RwLock, RwLockWriteGuard}; /// A static global structure containing unallocated pages. Once<> guarantees that the PageAllocator can only be initialized once. +//#[rr::name("PAGE_ALLOCATOR")] static PAGE_ALLOCATOR: Once> = Once::new(); /// This is a root node that represents the largest possible page size. Because of this implementation, there can be a maximum one page /// token of the maximum size, and it will be then stored in the root node. It is reasonable as long as we do not support systems that have /// more memory than 128TiB. For such systems, we must add larger page sizes. +/// Specification: +/// We give a "ghost name" γ to the allocator, which is used to link up page tokens allocated +/// with this allocator. +#[rr::refined_by("()" : "unit")] +/// Invariant: We abstract over the root node +#[rr::exists("node" : "page_storage_node")] +/// Invariant: the allocator tree covers the first 128TiB of memory +#[rr::invariant("node.(base_address) = 0")] +/// Invariant: the page size is the largest +#[rr::invariant("node.(max_node_size) = Size128TiB")] pub struct PageAllocator { + #[rr::field("node.(base_address)")] base_address: usize, + #[rr::field("node.(max_node_size)")] page_size: PageSize, + #[rr::field("node")] root: PageStorageTreeNode, } +#[rr::skip] +#[rr::context("onceG Σ memory_layout")] +#[rr::context("onceG Σ unit")] impl PageAllocator { const NOT_INITIALIZED: &'static str = "Bug. Page allocator not initialized."; @@ -34,23 +57,75 @@ impl PageAllocator { /// # Safety /// /// Caller must pass the ownership of the memory region [memory_start, memory_end). + #[rr::params("conf_start", "conf_end", "vs", "MEMORY_LAYOUT")] + #[rr::args("conf_start", "conf_end")] + + /// Precondition: The start address needs to be aligned to the minimum page size. + #[rr::requires("Hstart_aligned" : "conf_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + /// Precondition: The minimum page size divides the memory size. + #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (conf_end.2 - conf_start.2)")] + /// Precondition: The memory range should not be negative. + #[rr::requires("Hstart_lt" : "conf_start.2 ≤ conf_end.2")] + + /// Precondition: The memory range does not exceed the maximum page size. + #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + + /// Precondition: We have ownership of the memory range, having (conf_end - conf_start) bytes. + #[rr::requires(#type "conf_start" : "vs" @ "array_t (int u8) (Z.to_nat (conf_end.2 - conf_start.2))")] + + /// Precondition: The memory needs to be in confidential memory + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ memory_start.2")] + #[rr::requires("memory_end.2 ≤ MEMORY_CONFIG.(conf_end).2")] + + /// Precondition: The page allocator should be uninitialized. + #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" None")] + + /// Postcondition: The page allocator is initialized. + #[rr::ensures(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] + #[rr::returns("Ok(#())")] pub unsafe fn initialize(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result<(), Error> { ensure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; - let mut page_allocator = Self::empty(memory_start.as_usize()); + let mut page_allocator = Self::empty(); page_allocator.add_memory_region(memory_start, memory_end)?; + // NOTE: We initialize the invariant here. PAGE_ALLOCATOR.call_once(|| RwLock::new(page_allocator)); Ok(()) } - fn empty(base_address: usize) -> Self { - assert!(base_address > 1); - // It is ok to align downwards because the tree structure only logically represents memory regions, i.e., it does not need ownership - // of these memory regions. A tree node is only a placeholder for a page token. We must align downwards because the start of - // the confidential memory region may not be (and almost never will be) aligned to the largest page size. - let base_address_aligned_down = (base_address - 1) & !(PageSize::largest().in_bytes() - 1); - Self { root: PageStorageTreeNode::empty(), base_address: base_address_aligned_down, page_size: PageSize::largest() } + /// Specification: + /// Postcondition: an initialized memory allocator is returned + #[rr::returns("()")] + fn empty() -> Self { + Self { root: PageStorageTreeNode::empty(), base_address: 0, page_size: PageSize::largest() } } + #[rr::params("mstart", "mend", "γ", "vs", "MEMORY_CONFIG", "mreg")] + #[rr::args("(#(), γ)", "mstart", "mend")] + + /// Precondition: The start address needs to be aligned to the minimum page size. + #[rr::requires("Hstart_aligned" : "mstart `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + + /// Precondition: The minimum page size divides the memory size. + #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (mend.2 - mstart.2)")] + + /// Precondition: The memory range is positive. + #[rr::requires("Hstart_lt" : "mstart.2 < mend.2")] + + /// Precondition: The memory range is within the region covered by the memory allocator. + #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + + /// Precondition: We have ownership of the memory range, having (mend - mstart) bytes. + #[rr::requires(#type "mstart" : "vs" @ "array_t (int u8) (Z.to_nat (mend.2 - mstart.2))")] + + /// Precondition: The memory layout needs to be initialized + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: the whole memory region should be part of confidential memory + #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ mstart.2")] + #[rr::requires("mend.2 ≤ MEMORY_CONFIG.(conf_end).2")] + + /// Postcondition: There exists some correctly initialized page assignment. + #[rr::observe("γ": "()")] unsafe fn add_memory_region( &mut self, memory_region_start: ConfidentialMemoryAddress, memory_region_end: *const usize, ) -> Result<(), Error> { @@ -115,8 +190,8 @@ impl PageAllocator { // (`address`+`page_size.in_bytes()`) is next to the end of the memory region (`memory_region_end`)? if memory_address.is_some() || can_create_page(&address, &page_size) { let new_page_token = Page::::init(address, page_size.clone()); - // Below unwrap is safe because the PageAllocator constructor guarantees the initialization of the map for all possible page - // sizes. + // NOTE We show that the page token is within the range of + // the allocator self.root.store_page_token(self.base_address, self.page_size, new_page_token); } } @@ -126,6 +201,15 @@ impl PageAllocator { /// Returns a page token that has ownership over an unallocated memory region of the requested size. Returns error if it could not /// obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the requested /// criteria. + /// Specification: + #[rr::params("sz")] + #[rr::args("sz")] + /// Precondition: We require the page allocator to be initialized. + #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] + /// Postcondition: If a page is returned, it has the right size. + #[rr::exists("res")] + #[rr::ensures("if_Ok (λ pg, pg.(page_sz) = sz)")] + #[rr::returns("<#>@{result} res")] pub fn acquire_page(page_size_to_allocate: PageSize) -> Result, Error> { Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; @@ -136,11 +220,16 @@ impl PageAllocator { /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent to deallocation of the /// physical memory region owned by the returned page tokens. Given vector of pages might contains pages of arbitrary sizes. + #[rr::params("pages")] + #[rr::args("<#> pages")] + /// Precondition: We require the page allocator to be initialized. + #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] pub fn release_pages(released_pages: Vec>) { let _ = Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; let page_size = page_allocator.page_size; released_pages.into_iter().for_each(|page_token| { + // NOTE: we show that the token is within range of the allocator. page_allocator.root.store_page_token(base_address, page_size, page_token); }); Ok(()) @@ -157,20 +246,42 @@ impl PageAllocator { /// A node of a tree data structure that stores page tokens and maintains additional metadata that simplifies acquisition and /// release of page tokens. +/// Specification: +/// A node is refined by the size of this node, +/// its base address, +/// and the logical allocation state. +// TODO: consider using separate public and private interfaces +#[rr::refined_by("node" : "page_storage_node")] +/// We abstract over the components stored here +#[rr::exists("max_sz" : "option page_size")] +#[rr::exists("maybe_page_token" : "option page")] +#[rr::exists("children" : "list page_storage_node")] +/// Invariant: The representation invariant linking all these things together holds. +#[rr::invariant("page_storage_node_invariant node max_sz maybe_page_token children")] struct PageStorageTreeNode { // Page token owned by this node. `None` means that this page token has already been allocated or that it has been divided into smaller // pages token that were stored in this node's children. + #[rr::field("<#>@{option} maybe_page_token")] page_token: Option>, // Specifies what size of the page token can be allocated by exploring the tree starting at this node. // Invariant: if page token exist, then the its size is the max allocable size. Otherwise, the max allocable page size is the max // allocable page size of children + #[rr::field("<#>@{option} max_sz")] max_allocable_page_size: Option, // Invariant: Children store page tokens smaller than the page token stored in the parent node + #[rr::field("<#> children")] children: Vec, } +#[rr::skip] impl PageStorageTreeNode { /// Creates a new empty node with no allocation. + /// Specification: + /// We can choose an arbitrary node size and base address. + #[rr::params("node_size", "base_address")] + /// Precondition: the base address needs to be suitably aligned. + #[rr::requires("(page_size_align node_size | base_address)%Z")] + #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] pub fn empty() -> Self { Self { page_token: None, max_allocable_page_size: None, children: vec![] } } @@ -180,6 +291,27 @@ impl PageStorageTreeNode { /// PageSize variants. This method has an upper bounded computation complexity. /// /// Invariant: page token's size must not be greater than the page size allocable at this node + #[rr::params("node", "tok", "γ")] + #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "tok")] + + /// Precondition: The token we store has a smaller size than the node. + #[rr::requires("tok.(page_sz) ≤ₚ node.(max_node_size)")] + + /// Precondition: The page token is within the range of the node. + #[rr::requires("page_within_range node.(base_address) node.(max_node_size) tok")] + #[rr::exists("available_sz" : "page_size")] + /// Postcondition: we obtain an available page size... + #[rr::returns("available_sz")] + /// Postcondition: ...which is at least the size that we stored. + #[rr::ensures("tok.(page_sz) ≤ₚ available_sz")] + /// Postcondition: This node has been changed... + #[rr::exists("node'")] + #[rr::observe("γ": "node'")] + /// ...and now it can allocate a page of size `available_sz` + #[rr::ensures("page_node_can_allocate node' = Some available_sz")] + /// ...while the rest is unchanged + #[rr::ensures("node'.(max_node_size) = node.(max_node_size)")] + #[rr::ensures("node'.(base_address) = node.(base_address)")] pub fn store_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: Page, ) -> PageSize { @@ -192,9 +324,8 @@ impl PageStorageTreeNode { self.store_page_token_in_this_node(page_token); } else { assert!(this_node_page_size.smaller().is_some()); - if self.children.is_empty() { - self.initialize_children(this_node_page_size); - } + self.initialize_children_if_needed(this_node_page_size); + // Calculate which child should we invoke recursively. let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &page_token); // Let's go recursively to the node where this page belongs to. @@ -211,15 +342,25 @@ impl PageStorageTreeNode { self.max_allocable_page_size.unwrap() } - /// Recurisvely traverses the tree to reach a node that contains the page token of the requested size and returns this page token. This + /// Recursively traverses the tree to reach a node that contains the page token of the requested size and returns this page token. This /// function returns also a set of page size that are not allocable anymore at the node. This method has the max depth of recusrive /// invocation equal to the number of PageSize variants. This method has an upper bounded computation complexity. /// /// Invariants: requested page size to acquire must not be greater than a page size allocable at this node. + #[rr::trust_me] + #[rr::params("node", "γ", "sz_to_acquire")] + #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "sz_to_acquire")] + /// Precondition: The desired size must be bounded by this node's size (otherwise something went wrong) + #[rr::requires("sz_to_acquire ≤ₚ node.(max_node_size)")] + /// Postcondition: the function can either succeed to allocate a page or fail + #[rr::exists("res" : "result page _")] + #[rr::returns("res")] + /// If it succeeds, the returned page has the desired size. + #[rr::ensures("if_Ok res (λ pg, pg.(page_sz) = sz_to_acquire)")] pub fn acquire_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_size_to_acquire: PageSize, ) -> Result, Error> { - assert!(self.max_allocable_page_size >= Some(page_size_to_acquire)); + ensure!(self.max_allocable_page_size >= Some(page_size_to_acquire), Error::OutOfPages())?; if &this_node_page_size == &page_size_to_acquire { // End of recursion, we found the node from which we acquire a page token. assert!(self.page_token.is_some()); @@ -232,9 +373,7 @@ impl PageStorageTreeNode { // that was requested. assert!(this_node_page_size.smaller().is_some()); // Lazily initialize children - if self.children.is_empty() { - self.initialize_children(this_node_page_size); - } + self.initialize_children_if_needed(this_node_page_size); // Because we are looking for a page token of a smaller size, we must divide the page token owned by this node, if that has // not yet occured. @@ -265,14 +404,30 @@ impl PageStorageTreeNode { /// Outcome: /// - There is one child for every possible smaller page size that can be created for this node, /// - Every child is empty, i.e., has no children, no page token, no possible allocation. - fn initialize_children(&mut self, this_node_page_size: PageSize) { - assert!(self.children.is_empty()); - self.children = (0..this_node_page_size.number_of_smaller_pages()).map(|_| Self::empty()).collect(); + #[rr::params("node", "γ")] + #[rr::args("(#node, γ)", "node.(max_node_size)")] + /// Postcondition: leaves the page node unchanged except for initializing the + /// children if necessary + #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) node.(allocation_state) true")] + fn initialize_children_if_needed(&mut self, this_node_page_size: PageSize) { + if self.children.is_empty() { + self.children = (0..this_node_page_size.number_of_smaller_pages()).map(|_| Self::empty()).collect(); + } } /// Stores the given page token in the current node. /// /// Invariant: if there is page token then all page size equal or lower than the page token are allocable from this node. + #[rr::params("node", "γ", "tok")] + #[rr::args("(#node, γ)", "tok")] + /// Precondition: the node is in the unavailable state + #[rr::requires("node.(allocation_state) = PageTokenUnavailable")] + /// Precondition: the token's size is equal to this node's size + #[rr::requires("node.(max_node_size) = tok.(page_sz)")] + /// Precondition: the token's address matches the node's address + #[rr::requires("node.(base_address) = tok.(page_loc).2")] + /// Postcondition: the node changes to the available state + #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenAvailable node.(children_initialized)")] fn store_page_token_in_this_node(&mut self, page_token: Page) { assert!(self.page_token.is_none()); self.max_allocable_page_size = Some(*page_token.size()); @@ -283,6 +438,12 @@ impl PageStorageTreeNode { /// /// Invariant: This node owns a page token /// Invariant: After returning the page token, this node does not own the page token and has no allocation + #[rr::params("node", "γ")] + #[rr::args("(#node, γ)")] + #[rr::requires("node.(allocation_state) = PageTokenAvailable")] + #[rr::exists("tok")] + #[rr::returns("tok")] + #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenUnavailable node.(children_initialized)")] fn acquire_page_token_from_this_node(&mut self) -> Page { assert!(self.page_token.is_some()); self.max_allocable_page_size = None; @@ -295,20 +456,39 @@ impl PageStorageTreeNode { /// Invariant: After returning, this node can allocate pages of lower page sizes than the original page token. /// Invariant: After returning, every child node owns a page token of smaller size than the original page token. /// Invariant: After returning, every child can allocate a page token of smaller size than the original page token. + #[rr::params("node", "γ", "smaller_size")] + #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)")] + /// Precondition: The children should be initialized. + #[rr::requires("node.(children_initialized)")] + /// Precondition: A token is available in the current node. + #[rr::requires("node.(allocation_state) = PageTokenAvailable")] + /// Precondition: We assume there is a smaller node size. + #[rr::requires("page_size_smaller node.(max_node_size) = Some smaller_size")] + /// Postcondition: The node is updated to the "partially available" state, with a smaller node size being allocable + #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) (PageTokenPartiallyAvailable smaller_size) true")] fn divide_page_token(&mut self, this_node_base_address: usize, this_node_page_size: PageSize) { assert!(self.page_token.is_some()); + let mut smaller_pages = self.page_token.take().unwrap().divide(); assert!(smaller_pages.len() == self.children.len()); smaller_pages.drain(..).for_each(|smaller_page_token| { let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &smaller_page_token); self.children[index].store_page_token_in_this_node(smaller_page_token); }); + let smaller_size = self.max_allocable_page_size.unwrap().smaller().unwrap(); + self.max_allocable_page_size = Some(smaller_size); } /// Merges page tokens owned by children. /// /// Invariant: after merging, there is no child that owns a page token /// Invariant: after merging, this node owns a page token that has size larger than the ones owned before by children. + #[rr::params("node", "γ")] + #[rr::args("(#node, γ)", "node.(max_node_size)")] + #[rr::requires("node.(children_initialized)")] + #[rr::exists("state'")] + #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) state' true")] + #[rr::ensures("state' = node.(allocation_state) ∨ state' = PageTokenAvailable")] fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { if self.children.iter().all(|child| child.page_token.is_some()) { // All children have page tokens, thus we can merge them. @@ -321,6 +501,21 @@ impl PageStorageTreeNode { /// Returns the index of a child that can store the page token. /// /// Invariant: children have been created + #[rr::skip] + #[rr::args(#raw "#(-[ #maybe_tok; #available_sz; #children])", "base_addr", "node_sz", "#tok")] + /// Precondition: The children need to have been initialized. + #[rr::requires("length children = page_size_multiplier node_sz")] + /// Precondition: There is a smaller page size. + #[rr::requires("page_size_smaller node_sz = Some child_node_size")] + /// Precondition: The token's page size is smaller than this node's size. + #[rr::requires("tok.(page_sz) ≤ₚ child_node_size")] + /// Precondition: The token is within the range of this node. + #[rr::requires("page_within_range base_addr node_sz tok")] + #[rr::exists("i")] + #[rr::returns("i")] + #[rr::ensures("is_Some (children !! i)")] + // TODO: the token is in the range of the child node. + // TODO: does not work at this level of abstraction. Use a raw specification. fn calculate_child_index(&self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: &Page) -> usize { assert!(this_node_page_size > *page_token.size()); let index = (page_token.start_address() - this_node_base_address) / this_node_page_size.smaller().unwrap().in_bytes(); @@ -331,6 +526,11 @@ impl PageStorageTreeNode { /// Returns the base address and the page size of the child at the given index /// /// Invariant: children have been created + #[rr::params("node", "child_index", "child_node_size")] + #[rr::args("#node", "node.(base_address)", "node.(max_node_size)", "child_index")] + #[rr::requires("node.(children_initialized)")] + #[rr::requires("page_size_smaller node.(max_node_size) = Some child_node_size")] + #[rr::returns("-[child_base_address node.(base_address) child_node_size child_index; child_node_size] ")] fn child_address_and_size(&self, this_node_base_address: usize, this_node_page_size: PageSize, index: usize) -> (usize, PageSize) { assert!(index < self.children.len()); assert!(this_node_page_size.smaller().is_some()); diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index d46d277..6b22b38 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +#![rr::import("ace.theories.page_allocator", "page")] use crate::core::architecture::PageSize; use crate::core::control_data::{DigestType, MeasurementDigest}; use crate::core::memory_layout::{ConfidentialMemoryAddress, MemoryLayout, NonConfidentialMemoryAddress}; @@ -12,19 +13,51 @@ use core::ops::Range; pub trait PageState {} -pub enum UnAllocated {} -pub enum Allocated {} +pub struct UnAllocated {} +pub struct Allocated {} impl PageState for UnAllocated {} impl PageState for Allocated {} #[derive(Debug)] +/// Specification: +/// Mathematically, we model a `Page` as a triple `(page_loc, page_sz, page_val)`, where: +/// - `page_loc` is the start address in memory, +/// - `page_sz` is the size of the page, +/// - and `page_val` is the sequence of bytes currently stored in the page. +#[rr::refined_by("p" : "page")] + +/// Invariant: As an invariant, a `Page` *exclusively owns* this memory region, and ascribes the value `v` to it. +#[rr::invariant(#type "p.(page_loc)" : "<#> p.(page_val)" @ "array_t (int usize_t) (page_size_in_words_nat p.(page_sz))")] + +/// Invariant: The page is well-formed. +#[rr::invariant("page_wf p")] + +/// We require the page to be in this bounded memory region. +#[rr::invariant("(page_end_loc p).2 ≤ MAX_PAGE_ADDR")] + +/// We require the memory layout to have been initialized. +#[rr::context("onceG Σ memory_layout")] +#[rr::exists("MEMORY_CONFIG")] +/// Invariant: The MEMORY_LAYOUT Once instance has been initialized to MEMORY_CONFIG. +#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] +/// Invariant: ...and according to that layout, this page resides in confidential memory. +#[rr::invariant("MEMORY_CONFIG.(conf_start).2 ≤ p.(page_loc).2")] +#[rr::invariant("p.(page_loc).2 + (page_size_in_bytes_nat p.(page_sz)) < MEMORY_CONFIG.(conf_end).2")] pub struct Page { + /// Specification: the `address` has mathematical value `l`. + #[rr::field("p.(page_loc)")] address: ConfidentialMemoryAddress, + /// Specification: the `size` has mathematical value `sz`. + #[rr::field("p.(page_sz)")] size: PageSize, + /// Specification: the `_marker` has no relevance for the verification. + #[rr::field("tt")] _marker: PhantomData, } +#[rr::context("onceG Σ memory_layout")] +#[rr::context("onceG Σ gname")] impl Page { /// Creates a page token at the given address in the confidential memory. /// @@ -32,10 +65,39 @@ impl Page { /// /// The caller must guarantee that he owns the memory region defined by the address and size /// and his ownership is given to the page token. + /// + /// # Specification: + #[rr::params("l", "sz", "v", "MEMORY_CONFIG")] + /// The mathematical values of the two arguments are a memory location `l` and a size `sz`. + #[rr::args("l", "sz")] + + /// Precondition: We require ownership of the memory region starting at `l` for size `sz`. + /// Moreover, `l` needs to be properly aligned for a page of size `sz`, and contain valid integers. + #[rr::requires(#type "l" : "<#> v" @ "array_t (int usize_t) (page_size_in_words_nat sz)")] + + /// Precondition: The page needs to be sufficiently aligned. + #[rr::requires("l `aligned_to` (page_size_align sz)")] + + /// Precondition: The page is located in a bounded memory region. + #[rr::requires("l.2 + page_size_in_bytes_Z sz ≤ MAX_PAGE_ADDR")] + + /// Precondition: The memory layout is initialized. + #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: The page is entirely contained in the confidential memory range. + #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ l.2")] + #[rr::requires("l.2 + (page_size_in_bytes_nat sz) < MEMORY_CONFIG.(conf_end).2")] + /// Then, we get a properly initialized page starting at `l` of size `sz` with some value `v`. + #[rr::returns("mk_page l sz v")] pub(super) unsafe fn init(address: ConfidentialMemoryAddress, size: PageSize) -> Self { Self { address, size, _marker: PhantomData } } + /// Specification: + #[rr::only_spec] + #[rr::params("p")] + #[rr::args("p")] + /// We return a page starting at `l` with size `sz`, but with all bytes initialized to zero. + #[rr::returns("mk_page p.(page_loc) p.(page_sz) (zero_page p.(page_sz))")] pub fn zeroize(mut self) -> Page { self.clear(); Page { address: self.address, size: self.size, _marker: PhantomData } @@ -43,6 +105,20 @@ impl Page { /// Moves a page to the Allocated state after filling its content with the /// content of a page located in the non-confidential memory. + #[rr::only_spec] + #[rr::params("p", "l2", "v2", "MEMORY_CONFIG")] + #[rr::args("p", "l2")] + /// Precondition: We need to know the current memory layout. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + /// Precondition: The region we are copying from is in non-confidential memory. + #[rr::requires("MEMORY_CONFIG.(non_conf_start).2 ≤ l2.2")] + #[rr::requires("l2.2 + page_size_in_bytes_Z p.(page_sz) ≤ MEMORY_CONFIG.(non_conf_end).2")] + /// Precondition: We require ownership over the memory region. + #[rr::requires(#type "l2" : "<#> v2" @ "array_t (int usize_t) (page_size_in_words_nat p.(page_sz))")] + /// Postcondition: We return ownership over the memory region. + #[rr::ensures(#type "l2" : "<#> v2" @ "array_t (int usize_t) (page_size_in_words_nat p.(page_sz))")] + /// Postcondition: We get a correctly initialized page token with the copied contents. + #[rr::returns("Ok(#(mk_page p.(page_loc) p.(page_sz) v2))")] pub fn copy_from_non_confidential_memory(mut self, mut address: NonConfidentialMemoryAddress) -> Result, Error> { self.offsets().into_iter().try_for_each(|offset_in_bytes| { let non_confidential_address = MemoryLayout::read() @@ -59,11 +135,19 @@ impl Page { /// Returns a collection of all smaller pages that fit within the current page and /// are correctly aligned. If this page is the smallest page (4KiB for RISC-V), then /// the same page is returned. + #[rr::only_spec] + #[rr::params("p", "x" : "memory_layout")] + #[rr::args("p")] + /// Precondition: The memory layout needs to have been initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some x)")] + /// Postcondition: We get the subdivided pages. + #[rr::returns("<#> subdivide_page p")] pub fn divide(mut self) -> Vec> { - let memory_layout = MemoryLayout::read(); let smaller_page_size = self.size.smaller().unwrap_or(self.size); let number_of_smaller_pages = self.size.in_bytes() / smaller_page_size.in_bytes(); let page_end = self.end_address_ptr(); + // NOTE: this needs the invariant to already be open + let memory_layout = MemoryLayout::read(); (0..number_of_smaller_pages) .map(|i| { let offset_in_bytes = i * smaller_page_size.in_bytes(); @@ -96,33 +180,18 @@ impl Page { } } +#[rr::context("onceG Σ memory_layout")] impl Page { /// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned /// to the page allocator. + #[rr::trust_me] + #[rr::params("p")] + #[rr::args("p")] + #[rr::returns("mk_page p.(page_loc) p.(page_sz) (zero_page p.(page_sz))")] pub fn deallocate(mut self) -> Page { self.clear(); Page { address: self.address, size: self.size, _marker: PhantomData } } -} - -impl Page { - pub const ENTRY_SIZE: usize = mem::size_of::(); - - pub fn address(&self) -> &ConfidentialMemoryAddress { - &self.address - } - - pub fn start_address(&self) -> usize { - self.address.as_usize() - } - - pub fn end_address(&self) -> usize { - self.address.as_usize() + self.size.in_bytes() - } - - pub fn size(&self) -> &PageSize { - &self.size - } /// Reads data of size `size_of::` from a page at a given offset. Error is returned /// when an offset that exceeds page size is passed as an argument. @@ -132,18 +201,77 @@ impl Page { /// * `offset_in_bytes` is an offset from the beginning of the page to which a chunk of data /// will be read from the memory. This offset must be a multiply of size_of::(usize) and be /// within the page address range, otherwise an Error is returned. + /// Specification: + #[rr::params("p", "off")] + #[rr::args("#p", "off")] + /// Precondition: the offset needs to be divisible by the size of usize. + #[rr::requires("H_off" : "(ly_size usize_t | off)%Z")] + /// Precondition: we need to be able to fit a usize at the offset and not exceed the page bounds + #[rr::requires("H_sz" : "(off + ly_size usize_t ≤ page_size_in_bytes_Z p.(page_sz))%Z")] + /// Postcondition: there exists some value `x`... + #[rr::exists("x" : "Z", "off'" : "nat")] + /// ...where off is a multiple of usize + #[rr::ensures("(off = off' * ly_size usize_t)%Z")] + /// ...that has been read from the byte sequence `v` at offset `off` + #[rr::ensures("p.(page_val) !! off' = Some x")] + /// ...and we return an Ok containing the value `x` + #[rr::returns("Ok(#x)")] pub fn read(&self, offset_in_bytes: usize) -> Result { assert!(offset_in_bytes % Self::ENTRY_SIZE == 0); let data = unsafe { // Safety: below add results in a valid confidential memory address because // we ensure that it is within the page boundary and page is guaranteed to // be entirely inside the confidential memory. - let pointer = self.address.add(offset_in_bytes, self.end_address_ptr())?; + let pointer = self.address.add(offset_in_bytes, self.end_address_ptr()).unwrap(); // pointer is guaranteed to be in the range <0;self.size()-size_of::(usize)> + pointer.read_volatile() }; Ok(data) } +} + +#[rr::context("onceG Σ memory_layout")] +impl Page { + pub const ENTRY_SIZE: usize = mem::size_of::(); + + #[rr::params("p")] + #[rr::args("#p")] + #[rr::returns("#p.(page_loc)")] + pub fn address(&self) -> &ConfidentialMemoryAddress { + &self.address + } + + #[rr::params("p")] + #[rr::args("#p")] + #[rr::returns("p.(page_loc).2")] + pub fn start_address(&self) -> usize { + self.address.as_usize() + } + + #[rr::params("p")] + #[rr::args("#p")] + #[rr::returns("p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz)")] + pub fn end_address(&self) -> usize { + self.address.as_usize() + self.size.in_bytes() + } + + // NOTE: round-trip casts are difficult to verify, need support in RefinedRust + #[rr::only_spec] + #[rr::params("l", "sz")] + #[rr::args(#raw "#(-[#l; #sz; #tt])")] + #[rr::returns("l +ₗ page_size_in_bytes_Z sz")] + pub fn end_address_ptr(&self) -> *const usize { + // TODO: ideally, use strict-provenance API + self.end_address() as *const usize + } + + #[rr::params("p")] + #[rr::args("#p")] + #[rr::returns("#(p.(page_sz))")] + pub fn size(&self) -> &PageSize { + &self.size + } /// Writes data to a page at a given offset. Error is returned if an invalid offset was passed /// as an argument. @@ -153,13 +281,27 @@ impl Page { /// * `offset_in_bytes` is an offset from the beginning of the page to which a chunk of data /// will be written to the memory. This offset must be a multiply of size_of::(usize) and be /// within the page address range, otherwise an Error is returned. + /// Specification: + #[rr::only_spec] + #[rr::params("p", "off", "γ", "v2")] + #[rr::args("(#p, γ)", "off", "v2")] + /// Precondition: the offset needs to be divisible by the size of usize. + #[rr::requires("(ly_size usize_t | off)%Z")] + /// Precondition: we need to be able to fit a usize at the offset and not exceed the page bounds + #[rr::requires("(off + ly_size usize_t ≤ page_size_in_bytes_Z p.(page_sz))%Z")] + #[rr::exists("off'" : "Z")] + /// Postcondition: off is a multiple of usize + #[rr::ensures("off = (off' * ly_size usize_t)%Z")] + /// Postcondition: self has been updated to contain the value `v2` at offset `off` + #[rr::observe("γ": "mk_page p.(page_loc) p.(page_sz) (<[Z.to_nat off' := v2]> p.(page_val))")] + #[rr::returns("Ok(#())")] pub fn write(&mut self, offset_in_bytes: usize, value: usize) -> Result<(), Error> { ensure!(offset_in_bytes % Self::ENTRY_SIZE == 0, Error::AddressNotAligned())?; unsafe { // Safety: below add results in a valid confidential memory address because // we ensure that it is within the page boundary and page is guaranteed to // be entirely inside the confidential memory. - let pointer = self.address.add(offset_in_bytes, self.end_address_ptr())?; + let pointer = self.address.add(offset_in_bytes, self.end_address_ptr()).unwrap(); // pointer is guaranteed to be in the range <0;self.size()-size_of::(usize)> pointer.write_volatile(value); }; @@ -179,18 +321,36 @@ impl Page { } /// Returns all usize-aligned offsets within the page. + #[rr::skip] + #[rr::params("p")] + #[rr::args("#p")] + // the values that the iterator will yield? + #[rr::returns("step_list 0 (ly_size usize_t) (page_size_in_bytes_nat p.(page_sz))")] pub fn offsets(&self) -> core::iter::StepBy> { (0..self.size.in_bytes()).step_by(Self::ENTRY_SIZE) } - pub fn end_address_ptr(&self) -> *const usize { - self.end_address() as *const usize - } - + #[rr::only_spec] + #[rr::params("p", "γ")] + #[rr::args("(#p, γ)")] + /// Postcondition: The page has been zeroized. + #[rr::observe("γ": "mk_page p.(page_loc) p.(page_sz) (zero_page p.(page_sz))")] fn clear(&mut self) { // Safety: below unwrap() is fine because we iterate over page's offsets and thus always // request a write to an offset within the page. - self.offsets().for_each(|offset_in_bytes| self.write(offset_in_bytes, 0).unwrap()); + self.offsets().for_each( + #[rr::trust_me] + #[rr::params("off", "p")] + #[rr::args("off")] + // this should be dispatched by knowing that each argument is an element of the + // iterator, i.e. be implied by what for_each can guarantee + #[rr::requires("(ly_size usize_t | off)%Z")] + #[rr::requires("(off + ly_size usize_t ≤ page_size_in_bytes_Z p.(page_sz))%Z")] + #[rr::exists("off'")] + #[rr::ensures("off = (off' * ly_size usize_t)%Z")] + #[rr::capture("self" : "p" -> "mk_page p.(page_loc) p.(page_sz) (<[Z.to_nat off' := 0]> p.(page_val))")] + |offset_in_bytes| self.write(offset_in_bytes, 0).unwrap(), + ); } } diff --git a/security-monitor/src/lib.rs b/security-monitor/src/lib.rs index 95241f7..44ebcc7 100644 --- a/security-monitor/src/lib.rs +++ b/security-monitor/src/lib.rs @@ -17,8 +17,16 @@ // used for formal verification framework (RefinedRust annotations) register_tool, custom_inner_attributes, + stmt_expr_attributes, )] +// RefinedRust configuration #![register_tool(rr)] +#![rr::coq_prefix("sm")] +#![rr::include("spin")] +#![rr::include("alloc")] +#![rr::include("vec")] +#![rr::include("option")] +#![rr::include("result")] extern crate alloc; diff --git a/verification/.gitignore b/verification/.gitignore index 797a628..4ee74ed 100644 --- a/verification/.gitignore +++ b/verification/.gitignore @@ -3,5 +3,5 @@ _build log rustc-ice-* refinedrust -generated_code generated_code.bak +_CoqProject diff --git a/verification/Makefile b/verification/Makefile index c151904..d0437ee 100644 --- a/verification/Makefile +++ b/verification/Makefile @@ -8,6 +8,8 @@ MAKEFILE_SOURCE_DIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) REFINEDRUST_CARGO ?= cargo-refinedrust REFINEDRUST_RUSTC ?= refinedrust-rustc +REFINEDRUST_GENERATED_DIR ?= generated_code + # TODO: currently, this builds in-tree. # It would be good if we could build in the ACE_DIR # Main Problem: generating the RefinedRust files there. @@ -18,16 +20,34 @@ echo_toolchain: @echo "Using RefinedRust toolchain located at $(REFINEDRUST_CARGO)" backup_build_artifacts: - rm -rf generated_code.bak - @[ ! -d generated_code ] || (echo "Backing up generated files to generated_code.bak" && mv generated_code generated_code.bak) - rm -rf generated_code + rm -rf $(REFINEDRUST_GENERATED_DIR).bak + @[ ! -d $(REFINEDRUST_GENERATED_DIR) ] || (echo "Backing up generated files to $(REFINEDRUST_GENERATED_DIR).bak" && cp -r $(REFINEDRUST_GENERATED_DIR) $(REFINEDRUST_GENERATED_DIR).bak) generate: echo_toolchain backup_build_artifacts - RR_OUTPUT_DIR=$(MAKEFILE_SOURCE_DIR)/generated_code PATH="$(RISCV_GNU_TOOLCHAIN_WORK_DIR)/bin:$(PATH)" ACE_DIR=$(ACE_DIR) CROSS_COMPILE=$(CROSS_COMPILE) PLATFORM_RISCV_XLEN=$(PLATFORM_RISCV_XLEN) PLATFORM_RISCV_ISA=$(PLATFORM_RISCV_ISA) PLATFORM_RISCV_ABI=$(PLATFORM_RISCV_ABI) $(MAKE) -C ../security-monitor refinedrust + RR_CONFIG=$(MAKEFILE_SOURCE_DIR)/RefinedRust.toml PATH="$(RISCV_GNU_TOOLCHAIN_WORK_DIR)/bin:$(PATH)" ACE_DIR=$(ACE_DIR) CROSS_COMPILE=$(CROSS_COMPILE) PLATFORM_RISCV_XLEN=$(PLATFORM_RISCV_XLEN) PLATFORM_RISCV_ISA=$(PLATFORM_RISCV_ISA) PLATFORM_RISCV_ABI=$(PLATFORM_RISCV_ABI) $(MAKE) -C ../security-monitor refinedrust verify: generate @dune build --display short + @echo "✅︎ Verification succeeded" + clean: rm -rf $(ACE_DIR) # TODO: clean generated proof files + + +# generate _CoqProject file +define COQPROJECT_BODY +## theories for manually proved things +-Q _build/default/theories/base/ ace.theories.base +-Q _build/default/theories/page_allocator/ ace.theories.page_allocator +-Q _build/default/theories/page_table/ ace.theories.page_table +-Q _build/default/theories/memory_layout ace.theories.memory_layout + +# include RefinedRust-generated theories here +-Q _build/default/$(REFINEDRUST_GENERATED_DIR)/ace sm.ace +endef +export COQPROJECT_BODY + +coqproject: + @echo "$$COQPROJECT_BODY" > _CoqProject diff --git a/verification/RefinedRust.toml b/verification/RefinedRust.toml index 811a0d4..50b1335 100644 --- a/verification/RefinedRust.toml +++ b/verification/RefinedRust.toml @@ -1,5 +1,10 @@ dump_borrowck_info=false -output_dir="./generated_code" log_dir = "./log" +output_dir = "./rust_proofs" shims = "rr_shims.json" run_check = false +admit_proofs = true +skip_unsupported_features = false +extra_specs = "./extra_specs.v" +generate_dune_project = false +lib_load_paths = ["./refinedrust/stdlib/"] diff --git a/verification/_CoqProject b/verification/_CoqProject deleted file mode 100644 index 74ddadb..0000000 --- a/verification/_CoqProject +++ /dev/null @@ -1,11 +0,0 @@ -# We sometimes want to locally override notation, and there is no good way to do that with scopes. --arg -w -arg -notation-overridden -# Cannot use non-canonical projections as it causes massive unification failures -# (https://github.com/coq/coq/issues/6294). --arg -w -arg -redundant-canonical-projection - -## example theory for manually proved things -# -Q _build/default/theories/theory ace.theory - -# include RefinedRust-generated theories here --Q _build/default/rr_output/security_monitor ace.security_monitor.rr diff --git a/verification/dune b/verification/dune index 391d6f9..cdc55b4 100644 --- a/verification/dune +++ b/verification/dune @@ -1,2 +1,2 @@ ; Add project-wide flags here. -(dirs :standard \ refinedrust generated_code.bak) +(dirs :standard \ generated_code.bak) diff --git a/verification/extra_specs.v b/verification/extra_specs.v new file mode 100644 index 0000000..e69de29 diff --git a/verification/readme.md b/verification/readme.md index c9f73a7..fceecd8 100644 --- a/verification/readme.md +++ b/verification/readme.md @@ -3,6 +3,139 @@ This directory contains work-in-progress on verifying the security monitor. Currently, the verification is in early stages. -This document will be updated with details on the verified components and assurances as the verification effort progresses. +We use the tool [RefinedRust](https://plv.mpi-sws.org/refinedrust/) in the [Coq proof assistant](https://coq.inria.fr/) for verification. -For now, the [setup](setup.md) document describes how to setup the verification toolchain on your system. +## Architecture +Our approach to proving code in the security monitor is to add verification annotations to the Rust source code. +These include invariants as well as pre- and postconditions. + +Afterwards, RefinedRust translates the Rust source code and annotations into a representation in the Coq proof assistant. +This is initiated by running `make verify` in the root directory of the repository. + +The generated Coq code is placed in the [`verification/rust_proofs/ace`](/verification/rust_proofs/ace) folder. +There are two subfolders, [`generated`](/verification/rust_proofs/ace/generated) and [`proofs`](/verification/rust_proofs/ace/proofs). +The [`generated`](/verification/rust_proofs/ace/generated) subfolder contains the automatically generated model of the Rust code, the translated specifications, and proof templates. +It is re-generated on every run of `make verify` and thus should not be modified manually. +The [`proofs`](/verification/rust_proofs/ace/proofs) subfolder contains the proofs for the individual functions. +These files can be modified because they are not touched by RefinedRust after their initial creation. +If the default RefinedRust proof automation does not succeed in completing the proof, (correct) manual proof steps will need to be added to complete the proof of the function. + +In addition to the files managed by RefinedRust, the [`verification/theories`](/verification/theories) folder contains definitions and lemmas that are useful for specifying the Rust code. +These files are manually created and written, and imported into RefinedRust through annotations on the Rust code. + +### Example: Architecture of the Page Token verification + +| | Location | +|---------------------|----------| +| Rust source file | [`security_monitor/src/core/page_allocator/page.rs`](/security-monitor/src/core/page_allocator/page.rs) | +| Extra Coq theories | [`verification/theories/memory_tracker/page/page_extra.v`](/verification/theories/memory_tracker/page/page_extra.v) | +| Generated files | | +| \|- generated code | [`verification/rust_proofs/ace/generated/rust_proofs_ace.v`](/verification/rust_proofs/ace/generated/rust_proofs_ace.v) | +| \|- generated specs | [`verification/rust_proofs/ace/generated/generated_specs_ace.v`](/verification/rust_proofs/ace/generated/generated_specs_ace.v) | +| \|- proofs | [`verification/rust_proofs/ace/proofs`](/verification/rust_proofs/ace/proofs) | + +As a more concrete example, let us consider the `Page` structure and the `Page::init` function. + +For the `Page` structure, RefinedRust generates the following code: +1. in generated code: the definition `Page_sls` describing the layout of the `Page` struct +2. in generated specs: the definition `Page_ty` and `Page_inv_t` describing the `Page` struct type and the type containing the invariant specified through the annotations on the struct +The definition of the invariant on `Page` uses some extra (manually written) Coq theories, for instance the definition of the `page_size` Coq type. + +For the `Page::read` function, RefinedRust generates the following code: +1. in generated code: the definition `core_page_allocator_page_Page_core_page_allocator_page_T_read_def` containing the source code of the function translated into RefinedRust's operational model Radium +2. in generated specs: the definition `type_of_core_page_allocator_page_Page_core_page_allocator_page_T_read` contains the annotated specification of the function translated into RefinedRust's refinement type system +3. the file `generated_template_core_page_allocator_page_Page_core_page_allocator_page_T_read.v` in [`verification/rust_proofs/ace/generated/`](/verification/rust_proofs/ace/generated) contains the lemma statement that RefinedRust will prove for the function and a proof script for function-specific parts of the proof. +4. the file [`proof_core_page_allocator_page_Page_core_page_allocator_page_T_read.v`](/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_T_read.v) in [`verification/rust_proofs/ace/proofs/`](/verification/rust_proofs/ace/proofs) contains the proof of the lemma for the function. + The default proof that RefinedRust generates will use the proof script from step 3 and then call into RefinedRust's generic automation. + +## Getting Started +The [setup](setup.md) document details how to set the verification setup up on your system. + +Here are some pointers to get you started with verifying more code in our setup: +- the [RefinedRust paper](https://dl.acm.org/doi/10.1145/3656422) explains the basics of the verification system and methodology with a few simple examples +- the [RefinedRust tutorial](https://gitlab.mpi-sws.org/lgaeher/refinedrust-dev/-/blob/main/docs/tutorial.md?ref_type=heads) explains how to use RefinedRust on your own using a simple example +- the [RefinedRust documentation](https://gitlab.mpi-sws.org/lgaeher/refinedrust-dev/-/blob/main/docs/SPEC_FORMAT.md?ref_type=heads) provides a technical documentation of the available annotations +- the page token implementation is documented extensively, so you may learn by example. + +## Status + +The following table documents the verification status of the security monitor's individual modules. +Some less interesting support modules are not included. + +The Rust source path is relative to [`security_monitor/src`](/security_monitor/src/). + +| Module | Rust source | Status | Remarks | +|----------------------------|-------------|----------------------------|---------| +| Memory isolation config | | Specififed, partly verified | | +| \|- Page token | [`core/page_allocator/page.rs`](/security-monitor/src/core/page_allocator/page.rs) | Specified, partly verified | | +| \|- Page allocator | [`core/page_allocator/allocator.rs`](/security-monitor/src/core/page_allocator/allocator.rs) | Specified, not verified | | +| \|- Page table encoding | [`core/architecture/riscv/mmu`](/security-monitor/src/core/architecture/riscv/mmu) | Specified, not verified | | +| Initialization | [`core/initialization`](/security-monitor/src/core/initialization) | Specified, partly verified | | +| VM Interactions | | Unspecified | | +| \|- Interface | [`core/control_data`](/security-monitor/src/core/control_data) | | | +| \|- Confidential flow | [`confidential_flow`](/security-monitor/src/confidential_flow) | | | +| \|- Non-confidential flow | [`non_confidential_flow`](/security-monitor/src/non_confidential_flow) | | | + +### RefinedRust proof annotations + +Rust functions are only translated and verified by RefinedRust if RefinedRust annotations (`#[rr::...]`) are attached to them. +If a function is annotated, the function is verified against that specification, except if one of the following opt-out annotations is present: +- `#[rr::only_spec]`: only the specification is generated, but the code is not translated and not verified +- `#[rr::trust_me]`: the specification and code are generated, but the code is not verified against the specification + +These opt-out annotations help to progressively cover code with specification and delay the work on the verification itself. Eventually, a fully verified security monitor would not have any of the opt-out annotations and RefinedRust will verify that. + +## Guarantees + +### Verification goal + +Currently, we aim to verify memory safety (i.e., the absence of null dereferences, data races, use-after-free, etc.) and functional correctness (i.e., the code does what it is functionally intended to) of the security monitor implementation. + +In the future, we plan to verify (relaxed) non-interference, in order to show that confidential VMs are correctly isolated from each other and the hypervisor. +This will build on and directly integrate with our current verification of memory safety and functional correctness. + + +### Non-goals + +A fully verified virtualization stack required a number of components, this project is focused on only one of those necessary components. +There are several efforts which are complementary to the aims of this project: + +**Software** +* Hypervisor verification: While our security monitor architecture can ensure confidentiality of VMs without trusting the hypervisor, VMs may still need to communicate with the hypervisor (e.g. for IO). One may want to prove that the hypervisor is still trustworthy or that it responds correctly to hypercalls. + Our architecture assumes that the hypervisor retains control over threads scheduling, so it is in control over the availability of confidential VMs. One might try to prove the correctness of the hypervisor in order to get availability guarantees. +* VM verification: In order to actually run a secure workload end-to-end, one would have to verify the code of confidential VMs protected by the security monitor. +* Low-level firmware/bootloader verification: Our security monitor currently builds on OpenSBI, which we trust. + In future work, we would like to deprivilege firmware (see discussions in the RISC-V community on M-mode partitioning or M-mode lightweight isolation) or reimplement and prove the minimal set of required functionalities of the M-mode firmware (e.g., OpenSBI). + +**Hardware** +* verification of ISA security: Our security monitor relies on hardware components, such as RISC-V physical memory protection (PMP) or the memory management unit (MMU). We have to trust that these mechanisms provided by the RISC-V ISA are actually secure and do not allow unintentional information flow when correctly configured. +* microarchitectural verification: We need to rely on the hardware correctly implementing the ISA, and that the hardware has no side-channels that allow information flow beyond what is specified in the ISA. +* designing secure hardware + +**Verification technology** +* Compiler verification: We verify the Rust code against a model of the Rust semantics, but what actually runs on the hardware is the compiled assembly code. To transfer our verification results to the compiled code, we need a verified compiler for Rust. +* Assembly verification: Currently, we do not verify the small parts of the code written in assembly. +* Multi-language verification: Combining verification results for multiple languages such as Rust and Assembly is a challenging problem. + +### Rust model +RefinedRust verifies the Rust code against a model of the Rust semantics, specialized to 64-bit pointers (i.e., `sizeof(usize) = 64`) and Little Endian semantics. + +As there is no authoritative Rust semantics yet, RefinedRust's model necessarily has inaccuracies with respect to how the Rust compiler behaves. +The most important limitations of RefinedRust's model include following aspects of the Rust semantics: +- weak memory semantics (i.e., RefinedRust assumes sequential consistency) + +### Assembly semantics +In the current stage of the project, we do not yet verify the inline Assembly semantics and specifics of the RISC-V core (e.g. when switching execution between a VM and the security monitor). +Accurate verification of multi-language programs and verification of system software against authoritative ISA semantics are an open research problem under active research. + +### Trusted Computing Base +RefinedRust's is a foundational verification tool (i.e., the proofs are done in a proof assistant with a small proof kernel; concretely, the Coq proof assistant), and as such its trusted computing base is fairly small. +The risk of unintentional soundness bugs is much lower compared to tools relying on, e.g., SMT solvers. + +Nevertheless, there are some code parts which have to be trusted: +- the formalization of Rust's operational semantics in RefinedRust +- the statement of RefinedRust's top-level soundness statement +- the translation from Rust's MIR intermediate representation to RefinedRust's Coq code + +If you want to validate the behavior of the generated machine code with the verification, you have to add the translation from MIR to machine code in the Rust compiler. +On the other hand, if you want to validate that your surface-level Rust code has the correct behavior, you have to add the translation from surface-level Rust to MIR. diff --git a/verification/rr_shims.json b/verification/rr_shims.json index 43a96a6..650c098 100644 --- a/verification/rr_shims.json +++ b/verification/rr_shims.json @@ -1,44 +1,44 @@ [ { - "path": ["std", "mem", "swap"], + "path": ["core", "mem", "swap"], "kind": "function", - "name": "std_mem_swap", + "name": "core_mem_swap", "spec": "type_of_mem_swap" - }, - { - "path": ["std", "boxed", "Box", "new"], - "kind": "method", - "name": "box_new", - "spec": "type_of_box_new" }, { - "path": ["std", "mem", "size_of"], + "path": ["core", "mem", "size_of"], "kind": "function", - "name": "std_mem_size_of", + "name": "core_mem_size_of", "spec": "type_of_mem_size_of" }, - { - "path": ["std", "ptr", "read"], + { + "path": ["core", "ptr", "read"], "kind": "function", - "name": "std_ptr_read", + "name": "core_ptr_read", "spec": "type_of_ptr_read" }, - { - "path": ["std", "ptr", "write"], + { + "path": ["core", "ptr", "read_volatile"], + "kind": "function", + "name": "core_ptr_read", + "spec": "type_of_ptr_read" + }, + { + "path": ["core", "ptr", "write"], "kind": "function", - "name": "std_ptr_write", + "name": "core_ptr_write", "spec": "type_of_ptr_write" }, - { - "path": ["std", "ptr", "copy"], + { + "path": ["core", "ptr", "write_volatile"], "kind": "function", - "name": "std_ptr_copy", - "spec": "type_of_ptr_copy" + "name": "core_ptr_write", + "spec": "type_of_ptr_write" }, - { - "path": ["std", "option", "Option", "unwrap"], - "kind": "method", - "name": "std_option_Option_unwrap_def", - "spec": "type_of_std_option_Option_unwrap" + { + "path": ["core", "ptr", "copy"], + "kind": "function", + "name": "core_ptr_copy", + "spec": "type_of_ptr_copy" } ] diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v new file mode 100644 index 0000000..4d72082 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_architecture_riscv_mmu_page_size_PageSize_largest. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. + +Lemma core_architecture_riscv_mmu_page_size_PageSize_largest_proof (π : thread_id) : + core_architecture_riscv_mmu_page_size_PageSize_largest_lemma π. +Proof. + core_architecture_riscv_mmu_page_size_PageSize_largest_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v new file mode 100644 index 0000000..7f4d65c --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_architecture_riscv_mmu_page_size_PageSize_smallest. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. + +Lemma core_architecture_riscv_mmu_page_size_PageSize_smallest_proof (π : thread_id) : + core_architecture_riscv_mmu_page_size_PageSize_smallest_lemma π. +Proof. + core_architecture_riscv_mmu_page_size_PageSize_smallest_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v new file mode 100644 index 0000000..061025f --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v @@ -0,0 +1,26 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_memory_layout_MemoryLayout_read. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. + +Lemma core_memory_layout_MemoryLayout_read_proof (π : thread_id) : + core_memory_layout_MemoryLayout_read_lemma π. +Proof. + core_memory_layout_MemoryLayout_read_prelude. + + repeat liRStep; liShow. + liInst Hevar1 (MemoryLayout_inv_t). + liInst Hevar2 Spin_ty. + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v new file mode 100644 index 0000000..d15ce3f --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_proof (π : thread_id) : + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_lemma π. +Proof. + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v new file mode 100644 index 0000000..1a44c76 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new_proof (π : thread_id) : + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new_lemma π. +Proof. + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v new file mode 100644 index 0000000..bcf0131 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. + +Lemma core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr_proof (π : thread_id) : + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr_lemma π. +Proof. + core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v new file mode 100644 index 0000000..99fbecd --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. + +Lemma core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr_proof (π : thread_id) : + core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr_lemma π. +Proof. + core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v new file mode 100644 index 0000000..632c5cd --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v @@ -0,0 +1,24 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest_proof (π : thread_id) : + core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest_lemma π. +Proof. + core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest_prelude. + + repeat liRStep; liShow. + liInst Hevar0 (core_memory_protector_mmu_page_size_PageSize_ty). + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v new file mode 100644 index 0000000..a33d90c --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_memory_protector_mmu_page_size_PageSize_smallest. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_memory_protector_mmu_page_size_PageSize_smallest_proof (π : thread_id) : + core_memory_protector_mmu_page_size_PageSize_smallest_lemma π. +Proof. + core_memory_protector_mmu_page_size_PageSize_smallest_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v new file mode 100644 index 0000000..dfa896c --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_T_address. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_T_address_proof (π : thread_id) : + core_page_allocator_page_Page_T_address_lemma π. +Proof. + core_page_allocator_page_Page_T_address_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v new file mode 100644 index 0000000..2924cc0 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v @@ -0,0 +1,23 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_T_end_address. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_T_end_address_proof (π : thread_id) : + core_page_allocator_page_Page_T_end_address_lemma π. +Proof. + core_page_allocator_page_Page_T_end_address_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat; solve_goal. all: shelve. } + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v new file mode 100644 index 0000000..8364b0e --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_T_size. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_T_size_proof (π : thread_id) : + core_page_allocator_page_Page_T_size_lemma π. +Proof. + core_page_allocator_page_Page_T_size_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v new file mode 100644 index 0000000..9d98b65 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_T_start_address. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_T_start_address_proof (π : thread_id) : + core_page_allocator_page_Page_T_start_address_lemma π. +Proof. + core_page_allocator_page_Page_T_start_address_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v new file mode 100644 index 0000000..5665f31 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. \ No newline at end of file diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v new file mode 100644 index 0000000..dfe7101 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v @@ -0,0 +1,41 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read. + +Set Default Proof Using "Type". + +Lemma bytes_per_int_usize : + bytes_per_int usize_t = bytes_per_addr. +Proof. done. Qed. + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_core_page_allocator_page_Allocated_read_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_Allocated_read_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_Allocated_read_prelude. + + rep <-! liRStep; liShow. + { (* accessing the element of the array for the read requires manual reasoning *) + destruct H_off as (off' & ->). + + apply_update (updateable_typed_array_access p.(page_loc) off' (IntSynType usize_t)). + rep liRStep; liShow. + liInst Hevar1 (Allocated_ty). + rep liRStep; liShow. + } + { rep liRStep; liShow. } + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { move: H_sz. + rewrite /page_size_in_bytes_Z/page_size_in_bytes_nat bytes_per_int_usize. + clear. + assert (bytes_per_addr > 0). { solve_goal. } + nia. } + { revert H_off. rewrite -Z.rem_divide; done. } + Unshelve. all: print_remaining_sidecond. +Admitted. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v new file mode 100644 index 0000000..81cc7d3 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace. +From sm.ace.generated Require Import generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init. + +Set Default Proof Using "Type". + +Section proof. +Context `{!refinedrustGS Σ}. +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to admit_proofs config flag *) +End proof. diff --git a/verification/setup.md b/verification/setup.md index c909310..5442f9b 100644 --- a/verification/setup.md +++ b/verification/setup.md @@ -31,11 +31,18 @@ opam switch link refinedrust-ace . REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/setup-coq.sh REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-typesystem.sh +REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-stdlib.sh ``` - The setup script will setup your opam switch to include all dependencies of RefinedRust. -Afterwards, we install Lithium (the proof automation engine RefinedRust uses) as well as RefinedRust itself. +Afterwards, we install RefinedRust itself and its standard library specifications. + +#### Updating RefinedRust's Coq libraries +To update for a new version of RefinedRust, update your checkout and then re-run the last two commands above from the verification subfolder of this repository: +``` +REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-typesystem.sh +REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-stdlib.sh +``` ### Installing the RefinedRust frontend @@ -53,10 +60,22 @@ REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-frontend.sh This will install binaries `refinedrust-rustc` and `cargo-refinedrust` in your cargo path. This allows us to run `cargo refinedrust` in Rust projects. +#### Updating RefinedRust's frontend +To update for a new version of RefinedRust, update your checkout and then re-run the last command above from the verification subfolder of this repository: +``` +REFINEDRUST_ROOT=$PWD/refinedrust ./refinedrust/scripts/install-frontend.sh +``` + +### Interactive proofs +You can interactively explore the Coq code with an editor with a Coq plugin (such as Coqtail for vim, ProofGeneral for emacs, or VSCoq for VSCode). +If the Coq plugin of your choice does not provide native support for the `dune` build system, you may want to run `make coqproject` to generate a `_CoqProject` file that your editor can use to infer the project structure. + ## Verifying the code Now, we are ready to run the verification. -To that end, run +To that end, run (from the root of the repository) ``` make verify ``` + + diff --git a/verification/theories/base/base.v b/verification/theories/base/base.v new file mode 100644 index 0000000..ab34f3e --- /dev/null +++ b/verification/theories/base/base.v @@ -0,0 +1,12 @@ +From refinedrust Require Import typing. + +(** * Lemmas that should probably be upstreamed *) + +Lemma aligned_to_mul_inv l al num : + l `aligned_to` al * num → + l `aligned_to` al. +Proof. + rewrite /aligned_to. + destruct caesium_config.enforce_alignment; last done. + intros (k & Heq). intros. exists (k * num). lia. +Qed. diff --git a/verification/theories/memory_tracker/page/dune b/verification/theories/base/dune similarity index 68% rename from verification/theories/memory_tracker/page/dune rename to verification/theories/base/dune index d125907..9d2051d 100644 --- a/verification/theories/memory_tracker/page/dune +++ b/verification/theories/base/dune @@ -1,6 +1,6 @@ (coq.theory - (name ace.theories.page) + (name ace.theories.base) (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) - (synopsis "Manual proofs for ACE Security Monitor page") + (synopsis "Basic lemmas") (theories stdpp iris Ltac2 caesium lithium lrust RecordUpdate Equations refinedrust)) diff --git a/verification/theories/memory_layout/dune b/verification/theories/memory_layout/dune new file mode 100644 index 0000000..6d787a7 --- /dev/null +++ b/verification/theories/memory_layout/dune @@ -0,0 +1,6 @@ +(coq.theory + (name ace.theories.memory_layout) + (package ace) + (flags -w -notation-overridden -w -redundant-canonical-projection) + (synopsis "Manual proofs for ACE Security Monitor memory layout") + (theories stdpp iris Ltac2 caesium lithium lrust RecordUpdate Equations refinedrust)) diff --git a/verification/theories/memory_layout/memory_layout.v b/verification/theories/memory_layout/memory_layout.v new file mode 100644 index 0000000..fd2161a --- /dev/null +++ b/verification/theories/memory_layout/memory_layout.v @@ -0,0 +1,13 @@ +From refinedrust Require Import typing. + +(** * Additional definitions for the security monitor's global memory layout *) + +Record memory_layout : Type := mk_memory_layout { + conf_start : loc; + conf_end : loc; + non_conf_start : loc; + non_conf_end : loc; +}. + +Global Instance memory_layout_inhabited : Inhabited memory_layout := + populate (mk_memory_layout inhabitant inhabitant inhabitant inhabitant). diff --git a/verification/theories/memory_tracker/page/page_extra.v b/verification/theories/memory_tracker/page/page_extra.v deleted file mode 100644 index a252435..0000000 --- a/verification/theories/memory_tracker/page/page_extra.v +++ /dev/null @@ -1,33 +0,0 @@ -From refinedrust Require Import typing. - -(* This reflects the page sizes in [core/mmu/page_size.rs] *) -Inductive page_size : Set := - | Size4KiB - | Size2MiB - | Size1GiB - | Size512GiB - | Size128TiB. - -Definition page_size_to_nat (sz : page_size) : nat := - match sz with - | Size4KiB => 8 * 512 - | Size2MiB => 8 * 512 * 512 - | Size1GiB => 8 * 512 * 512 * 512 - | Size512GiB => 8 * 512 * 512 * 512 - | Size128TiB => 8 * 512 * 512 * 512 * 512 * 256 - end. -Definition page_size_to_Z (sz : page_size) : Z := - page_size_to_nat sz. - -(* Pages should be aligned to the size of the page *) -Definition page_size_align_log (sz : page_size) : nat := - match sz with - | Size4KiB => 12 - | Size2MiB => 21 - | Size1GiB => 30 - | Size512GiB => 39 - | Size128TiB => 47 - end. - -Definition mk_page_layout (sz : page_size) : layout := - Layout (page_size_to_nat sz) (page_size_align_log sz). diff --git a/verification/theories/page_allocator/dune b/verification/theories/page_allocator/dune new file mode 100644 index 0000000..1583138 --- /dev/null +++ b/verification/theories/page_allocator/dune @@ -0,0 +1,6 @@ +(coq.theory + (name ace.theories.page_allocator) + (package ace) + (flags -w -notation-overridden -w -redundant-canonical-projection) + (synopsis "Manual proofs for ACE Security Monitor page allocator") + (theories stdpp iris Ltac2 caesium lithium lrust RecordUpdate Equations refinedrust ace.theories.base)) diff --git a/verification/theories/page_allocator/page.v b/verification/theories/page_allocator/page.v new file mode 100644 index 0000000..cdb2fae --- /dev/null +++ b/verification/theories/page_allocator/page.v @@ -0,0 +1,331 @@ +From refinedrust Require Import typing. +From ace.theories.base Require Import base. + +(** * Extra theories for page sizes and pages *) + +(* This reflects the page sizes in [core/architecture/riscv/mmu/page_size.rs] *) +Inductive page_size : Set := + | Size4KiB + | Size16KiB + | Size2MiB + | Size1GiB + | Size512GiB + | Size128TiB. + +Global Instance page_size_inh : Inhabited page_size. +Proof. exact (populate Size4KiB). Qed. +Global Instance page_size_eqdec : EqDecision page_size. +Proof. solve_decision. Defined. +Global Instance page_size_countable : Countable page_size. +Proof. + refine (inj_countable (λ sz, + match sz with + | Size4KiB => 0 + | Size16KiB => 1 + | Size2MiB => 2 + | Size1GiB => 3 + | Size512GiB => 4 + | Size128TiB => 5 + end) + (λ a, + match a with + | 0 => Some Size4KiB + | 1 => Some Size16KiB + | 2 => Some Size2MiB + | 3 => Some Size1GiB + | 4 => Some Size512GiB + | 5 => Some Size128TiB + | _ => None + end) _). + intros []; naive_solver. +Qed. + +(** Number of 64-bit machine words in a page size *) +Definition page_size_in_words_nat (sz : page_size) : nat := + match sz with + | Size4KiB => 512 + | Size16KiB => 4 * 512 + | Size2MiB => 512 * 512 + | Size1GiB => 512 * 512 * 512 + | Size512GiB => 512 * 512 * 512 * 512 + | Size128TiB => 512 * 512 * 512 * 512 * 256 + end. + +Definition page_size_in_bytes_nat (sz : page_size) : nat := + bytes_per_addr * page_size_in_words_nat sz. +Definition page_size_in_bytes_Z (sz : page_size) : Z := + page_size_in_bytes_nat sz. + +(** The next smaller page size *) +Definition page_size_smaller (sz : page_size) : option page_size := + match sz with + | Size4KiB => None + | Size16KiB => Some Size4KiB + | Size2MiB => Some Size16KiB + | Size1GiB => Some Size2MiB + | Size512GiB => Some Size1GiB + | Size128TiB => Some Size512GiB + end. +(** The next larger page size *) +Definition page_size_larger (sz : page_size) : option page_size := + match sz with + | Size4KiB => Some Size16KiB + | Size16KiB => Some Size2MiB + | Size2MiB => Some Size1GiB + | Size1GiB => Some Size512GiB + | Size512GiB => Some Size128TiB + | Size128TiB => None + end. + +(** Pages should be aligned to the size of the page; + compute the log2 of this page's alignment *) +Definition page_size_align_log (sz : page_size) : nat := + match sz with + | Size4KiB => 12 + | Size16KiB => 14 + | Size2MiB => 21 + | Size1GiB => 30 + | Size512GiB => 39 + | Size128TiB => 47 + end. +Definition page_size_align (sz : page_size) : nat := + 2^(page_size_align_log sz). + +Lemma page_size_align_is_size sz : + (page_size_align sz = page_size_in_bytes_nat sz)%nat. +Proof. + rewrite /page_size_align/page_size_in_bytes_nat/bytes_per_addr/bytes_per_addr_log. + Local Arguments Nat.mul : simpl never. + destruct sz; simpl; lia. +Qed. + +(** The maximum address at which a page may be located (one-past-the-end address) *) +(* Sealed because it is big and will slow down Coq *) +Definition MAX_PAGE_ADDR_def : Z := + page_size_in_bytes_Z Size128TiB. +Definition MAX_PAGE_ADDR_aux : seal MAX_PAGE_ADDR_def. Proof. by eexists. Qed. +Definition MAX_PAGE_ADDR := unseal MAX_PAGE_ADDR_aux. +Definition MAX_PAGE_ADDR_unfold : MAX_PAGE_ADDR = MAX_PAGE_ADDR_def := + seal_eq MAX_PAGE_ADDR_aux. + +(** * Pages *) +(** Page containing zeroes *) +Definition zero_page (sz : page_size) : list Z := + replicate (page_size_in_words_nat sz) 0. + +(** Logical representation of a page, consisting of: + - its location in memory + - its size + - its list of words, represented as integers *) +Record page : Type := mk_page { + page_loc : loc; + page_sz : page_size; + page_val : list Z +}. +Global Instance page_inh : Inhabited page. +Proof. exact (populate (mk_page inhabitant inhabitant inhabitant)). Qed. +Global Instance page_eqdec : EqDecision page. +Proof. solve_decision. Defined. +Global Instance page_countable : Countable page. +Proof. + refine (inj_countable' (λ p, (p.(page_loc), p.(page_sz), p.(page_val))) + (λ p, mk_page p.1.1 p.1.2 p.2) _). + intros []; done. +Qed. + +(** One-past-the-end location of this page *) +Definition page_end_loc (p : page) : loc := + p.(page_loc) +ₗ (page_size_in_bytes_Z p.(page_sz)). + +(** Order on page sizes *) +Definition page_size_le (p1 p2 : page_size) := + page_size_in_words_nat p1 ≤ page_size_in_words_nat p2. +Arguments page_size_le : simpl never. + +Infix "≤ₚ" := page_size_le (at level 50). + +(** Well-formedness of a page *) +Definition page_wf (p : page) : Prop := + (** The value has the right size *) + page_size_in_words_nat p.(page_sz) = length p.(page_val) ∧ + (** The location has the right alignment *) + p.(page_loc) `aligned_to` page_size_align p.(page_sz). + +(** The list of [pages] forms a contiguous region of memory, one page after another. *) +Fixpoint pages_are_contiguous (pages : list page) : Prop := + match pages with + | [] => True + | pg1 :: pgs => + match pgs with + | [] => True + | pg2 :: pgs' => + pages_are_contiguous pgs ∧ + pg1.(page_loc) +ₗ (page_size_in_bytes_Z pg1.(page_sz)) = pg2.(page_loc) + end + end. +Arguments pages_are_contiguous : simpl never. + +Lemma pages_are_contiguous_cons p1 ps : + pages_are_contiguous ps → + (match head ps with | Some p2 => p2.(page_loc) = p1.(page_loc) +ₗ (page_size_in_bytes_nat p1.(page_sz)) | None => True end) → + pages_are_contiguous (p1 :: ps). +Proof. + intros Ha Hh. + rewrite /pages_are_contiguous. + destruct ps as [ | p2 ps]; first done. + simpl in *. split; done. +Qed. + + +(** How many pages of the next smaller page size are there in this page size? *) +Definition page_size_multiplier_log (sz : page_size) : nat := + match sz with + | Size4KiB => 0 + | Size16KiB => 2 + | Size2MiB => 7 + | Size1GiB => 9 + | Size512GiB => 9 + | Size128TiB => 8 + end. +Definition page_size_multiplier (sz : page_size) : nat := + 2^(page_size_multiplier_log sz). + +Lemma page_size_multiplier_size_in_words sz sz' : + page_size_smaller sz = Some sz' → + page_size_in_words_nat sz = (page_size_in_words_nat sz' * page_size_multiplier sz)%nat. +Proof. + destruct sz; first done. + all: unfold page_size_smaller; intros ?; simplify_eq. + all: unfold page_size_in_words_nat, page_size_multiplier, page_size_multiplier_log. + all: cbn [Nat.pow]; lia. +Qed. +Lemma page_size_multiplier_align_log sz sz' : + page_size_smaller sz = Some sz' → + page_size_align_log sz = (page_size_align_log sz' + page_size_multiplier_log sz)%nat. +Proof. + destruct sz; first done. + all: unfold page_size_smaller; intros ?; simplify_eq. + all: unfold page_size_align_log, page_size_multiplier, page_size_multiplier_log. + all: lia. +Qed. +Lemma page_size_multiplier_align sz sz' : + page_size_smaller sz = Some sz' → + page_size_align sz = (page_size_align sz' * page_size_multiplier sz)%nat. +Proof. + intros Ha. rewrite /page_size_align /page_size_multiplier. + rewrite -Nat.pow_add_r. + f_equiv. by apply page_size_multiplier_align_log. +Qed. + +(** Divide a page into pages of the next smaller page size *) +Definition subdivide_page (p : page) : list page := + match page_size_smaller p.(page_sz) with + | None => [p] + | Some smaller => + let count := page_size_multiplier p.(page_sz) in + let sizes := replicate count (page_size_in_words_nat smaller) in + let values := reshape sizes p.(page_val) in + zip_with (λ value num, + mk_page + (p.(page_loc) +ₗ (page_size_in_bytes_nat smaller * num)%nat) + smaller + value + ) values (seq 0 count) + end. +Arguments subdivide_page : simpl never. + +Lemma subdivide_page_length p : + length (subdivide_page p) = page_size_multiplier p.(page_sz). +Proof. + destruct p as [l sz vs]. + rewrite /subdivide_page/=. + destruct (page_size_smaller sz) as [ sz' | ] eqn:Heq; simpl; first last. + { destruct sz; done. } + + set (num := (page_size_multiplier sz)%nat). + generalize num. clear Heq num. + + unfold page_size_in_bytes_nat. intros num. + clear sz. + set (start := 0%nat). + set (words := page_size_in_words_nat sz'). + replace vs with (drop (start * words) vs); last done. + generalize start. clear start => start. + + induction num as [ | num IH] in start |-*; first done. + Local Arguments Nat.mul : simpl never. simpl. f_equiv. + rewrite drop_drop. + replace (start * words + words)%nat with ((S start) * words)%nat by lia. + apply (IH (S start)). +Qed. + +Lemma subdivide_page_wf p : + page_wf p → + Forall page_wf (subdivide_page p). +Proof. + destruct p as [l sz vs]. + rewrite /subdivide_page/=. + destruct (page_size_smaller sz) as [ sz' | ] eqn:Heq; simpl; first last. + { econstructor; eauto. } + + specialize (page_size_multiplier_size_in_words _ _ Heq). + specialize (page_size_multiplier_align _ _ Heq). + set (num := (page_size_multiplier sz)%nat). + rewrite /page_wf/= => -> ->. + generalize num. clear Heq num sz. + + unfold page_size_in_bytes_nat. intros num. + set (start := 0%nat). + set (words := page_size_in_words_nat sz'). + replace vs with (drop (start * words) vs); last done. + generalize start. clear start => start [] Hlen Hal. + apply aligned_to_mul_inv in Hal. + + induction num as [ | num IH] in start, Hlen, Hal |-*; first done. + Arguments Nat.mul : simpl never. simpl. + econstructor. + - simpl. split. + { rewrite take_length -Hlen. lia. } + { subst words. eapply aligned_to_offset; first done. + rewrite page_size_align_is_size /page_size_in_bytes_nat. + rewrite Nat2Z.divide. apply Nat.divide_factor_l. } + - rewrite drop_drop. + replace (start * words + words)%nat with ((S start) * words)%nat by lia. + apply (IH (S start)); last done. + rewrite Nat.mul_succ_l. + rewrite -drop_drop drop_length -Hlen. + lia. +Qed. + +Lemma subdivide_page_is_contiguous p : + pages_are_contiguous (subdivide_page p). +Proof. + destruct p as [l sz vs]. + rewrite /subdivide_page/=. + destruct (page_size_smaller sz) as [ sz' | ] eqn:Heq; simpl; last done. + + set (num := (page_size_multiplier sz)%nat). + generalize num. clear Heq num. + + unfold page_size_in_bytes_nat. intros num. + clear sz. + + set (start := 0%nat). + set (words := page_size_in_words_nat sz'). + replace vs with (drop (start * words) vs); last done. + generalize start. clear start => start. + + induction num as [ | num IH] in start |-*; first done. + Arguments Nat.mul : simpl never. simpl. + apply pages_are_contiguous_cons. + - fold @replicate. + rewrite drop_drop. + replace (start * words + words)%nat with ((S start) * words)%nat by lia. + apply (IH (S start)). + - simpl. + fold @replicate. + destruct num; simpl; first done. + rewrite shift_loc_assoc. + f_equiv. + rewrite /page_size_in_bytes_nat. lia. +Qed. diff --git a/verification/theories/page_allocator/page_allocator.v b/verification/theories/page_allocator/page_allocator.v new file mode 100644 index 0000000..2d3e804 --- /dev/null +++ b/verification/theories/page_allocator/page_allocator.v @@ -0,0 +1,172 @@ +From refinedrust Require Import typing. +From refinedrust Require Import ghost_var_dfrac. +From ace.theories.page_allocator Require Import page. + +(** * Page allocator ghost state *) + +Record memory_region := { + mreg_start : Z; + mreg_size : nat; +}. +Definition mreg_end (mreg : memory_region) : Z := + mreg.(mreg_start) + mreg.(mreg_size). + +Class memory_regionG Σ := { + mreg_ghost_var :: ghost_varG Σ memory_region; +}. +Section page_allocator. + Context `{!typeGS Σ}. + Context `{!memory_regionG Σ}. + + Definition has_memory_region_def (γ : gname) (mreg : memory_region) := + ghost_var γ DfracDiscarded mreg. + Definition has_memory_region_aux : seal (@has_memory_region_def). Proof. by eexists. Qed. + Definition has_memory_region := has_memory_region_aux.(unseal). + Definition has_memory_region_eq : @has_memory_region = @has_memory_region_def := has_memory_region_aux.(seal_eq). + + Lemma has_memory_region_alloc mreg : + ⊢ |==> ∃ γ, has_memory_region γ mreg. + Proof. + rewrite has_memory_region_eq. + iMod (ghost_var_alloc mreg) as (γ) "Hvar". + iExists γ. by iApply ghost_var_discard. + Qed. + + Lemma has_memory_region_agree γ mreg1 mreg2 : + has_memory_region γ mreg1 -∗ + has_memory_region γ mreg2 -∗ + ⌜mreg1 = mreg2⌝. + Proof. + rewrite has_memory_region_eq. + iApply ghost_var_agree. + Qed. + + Global Instance has_memory_region_pers γ mreg : Persistent (has_memory_region γ mreg). + Proof. + rewrite has_memory_region_eq. + apply _. + Qed. +End page_allocator. + +(** * Page allocator invariants *) +Inductive node_allocation_state := + (** This page node is completely allocated *) + | PageTokenUnavailable + (** The page token in this node is available *) + | PageTokenAvailable + (** The page node is partially available, with a page of the given size being allocable *) + | PageTokenPartiallyAvailable (allocable_sz : page_size) +. +Global Instance node_allocation_state_eqdec : EqDecision node_allocation_state. +Proof. solve_decision. Defined. + +(** Our logical representation of storage nodes *) +Record page_storage_node : Type := mk_page_node { + (* The size of memory this node controls *) + max_node_size : page_size; + (* The base address of the memory region of this node *) + base_address : Z; + (* TODO: this state should not really be part of the public state *) + (* the current state of this node *) + allocation_state : node_allocation_state; + (* TODO: this state should not really be part of the public state *) + (* whether the child nodes have been initialized *) + children_initialized : bool; +}. + +(** Compute the base address of a child node *) +Definition child_base_address (parent_base_address : Z) (child_size : page_size) (child_index : Z) : Z := + parent_base_address + (page_size_in_bytes_Z child_size * child_index). + +(** Assert that the base address and node size of the children matches up, that the children are sorted. + The list of children need not be complete (i.e. it might also be empty). +*) +Definition page_storage_node_children_wf (parent_base_address : Z) (parent_node_size : page_size) (children : list page_storage_node) : Prop := + (* If there is a smaller child node size *) + (∀ child_node_size, + page_size_smaller parent_node_size = Some child_node_size → + (* Then all children are sorted *) + (∀ (i : nat) child, children !! i = Some child → + child.(max_node_size) = child_node_size ∧ + child.(base_address) = child_base_address parent_base_address parent_node_size i)) ∧ + (* If there can't be any children, there are no children *) + (page_size_smaller parent_node_size = None → + children = []) +. + +(** Computes the maximum size this page node can allocate *) +Definition page_node_can_allocate (node : page_storage_node) : option page_size := + match node.(allocation_state) with + | PageTokenUnavailable => None + | PageTokenAvailable => Some node.(max_node_size) + | PageTokenPartiallyAvailable allocable => Some allocable + end. + +(** The logical invariant on a page node *) +Definition page_storage_node_invariant + (node : page_storage_node) + (max_sz : option page_size) (maybe_page_token : option page) (children : list page_storage_node) := + + (* The children, if any, are well-formed *) + page_storage_node_children_wf node.(base_address) node.(max_node_size) children ∧ + (* the base address is suitably aligned *) + (page_size_align node.(max_node_size) | node.(base_address))%Z ∧ + + (* initialization of child nodes *) + (if node.(children_initialized) then length children = page_size_multiplier node.(max_node_size) else True) ∧ + + (* invariant depending on the allocation state: *) + if decide (node.(allocation_state) = PageTokenUnavailable) + then + (* No allocation is possible *) + maybe_page_token = None ∧ max_sz = None ∧ + + (* all children are unavailable *) + (* TODO do we need this *) + Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + else if decide (node.(allocation_state) = PageTokenAvailable) + then + (* This node is completely available *) + ∃ token, + (* there is some token *) + maybe_page_token = Some token ∧ + (* the allocable size spans the whole page *) + max_sz = Some (node.(max_node_size)) ∧ + (* the token spans the whole node *) + token.(page_loc).2 = node.(base_address) ∧ + token.(page_sz) = node.(max_node_size) ∧ + + (* all children are unavailable *) + (* TODO: do we need this? *) + Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + else + + (* This node is partially available with initialized children *) + maybe_page_token = None ∧ + (* Some size is available *) + ∃ allocable_sz, + node.(allocation_state) = PageTokenPartiallyAvailable allocable_sz ∧ + max_sz = Some allocable_sz ∧ + + (* children need to be initialized *) + node.(children_initialized) = true ∧ + + (* The maximum size stored in this node needs to be available in one of the children *) + ∃ i child, + children !! i = Some child ∧ + page_node_can_allocate child = Some allocable_sz +. + +Lemma page_storage_node_invariant_empty node_size base_address : + (page_size_align node_size | base_address) → + page_storage_node_invariant (mk_page_node node_size base_address PageTokenUnavailable false) None None []. +Proof. + intros. + split_and!; simpl; last split_and!; try done. + apply Nat2Z.divide. done. +Qed. + +(** States that the page is in the given memory range *) +(* TODO unify all the memory range stuff *) +Definition page_within_range (base_address : Z) (sz : page_size) (p : page) : Prop := + (base_address ≤ p.(page_loc).2 ∧ p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz) < base_address + page_size_in_bytes_Z sz)%Z. diff --git a/verification/theories/page_table/dune b/verification/theories/page_table/dune new file mode 100644 index 0000000..51d328d --- /dev/null +++ b/verification/theories/page_table/dune @@ -0,0 +1,6 @@ +(coq.theory + (name ace.theories.page_table) + (package ace) + (flags -w -notation-overridden -w -redundant-canonical-projection) + (synopsis "Manual proofs for ACE Security Monitor page table") + (theories stdpp iris Ltac2 caesium lithium lrust RecordUpdate Equations refinedrust ace.theories.page_allocator)) diff --git a/verification/theories/page_table/page_table.v b/verification/theories/page_table/page_table.v new file mode 100644 index 0000000..ff78a03 --- /dev/null +++ b/verification/theories/page_table/page_table.v @@ -0,0 +1,455 @@ +From refinedrust Require Import typing. +From ace.theories.page_allocator Require Import page. +From stdpp Require Import bitvector. + +(* Based on Release riscv-isa-release-7b8ddc9-2024-05-07 *) + +(** Basic defs *) +Definition bv_to_bit {n : N} (m : nat) (bv : bv n) : bool := + bv_to_bits bv !!! m. + +Definition bit_to_bv (b : bool) : bv 1 := + bool_to_bv 1 b. + +(** * Architectural ground-truth *) +(* TODO: have file for translated sail specs and then link up with this *) + +(** ** Paging system *) +(* Currently we only include the supported paging systems *) +Inductive paging_system := + | Sv57. + +Global Instance paging_system_eqdec : EqDecision paging_system. +Proof. unfold EqDecision, Decision. intros. destruct x, y; decide equality. Qed. + +(** The virtual address length *) +Definition paging_system_virtual_address_length (sys : paging_system) : N := + match sys with + | Sv57 => 57 + end. + +(** The physical address length *) +Definition physical_address_length : N := 56. + +(** ** PTE Flags *) +Record pte_flags : Type := mk_pte_flags { + (* 0 *) + PTEValid : bool; + (* 1 *) + PTERead : bool; + (* 2 *) + PTEWrite : bool; + (* 3 *) + PTEExecute : bool; + (* 4 *) + PTEUser : bool; + (* 5 *) + PTEGlobal : bool; + (* 6 *) + PTEAccessed : bool; + (* 7 *) + PTEDirty : bool; +}. + +Definition pte_flags_invalid : pte_flags := + mk_pte_flags false false false false false false false false. + +(* Decode PTE flags *) +Definition pte_decode_flags (pte : bv 7) : pte_flags := + mk_pte_flags + (bv_to_bit 0 pte) + (bv_to_bit 1 pte) + (bv_to_bit 2 pte) + (bv_to_bit 3 pte) + (bv_to_bit 4 pte) + (bv_to_bit 5 pte) + (bv_to_bit 6 pte) + (bv_to_bit 7 pte) +. + +(* Encode PTE flags *) +Definition pte_encode_flags (flags : pte_flags) : bv 8 := + bv_concat 8 (bit_to_bv flags.(PTEDirty)) + $ bv_concat 7 (bit_to_bv flags.(PTEAccessed)) + $ bv_concat 6 (bit_to_bv flags.(PTEGlobal)) + $ bv_concat 5 (bit_to_bv flags.(PTEUser)) + $ bv_concat 4 (bit_to_bv flags.(PTEExecute)) + $ bv_concat 3 (bit_to_bv flags.(PTEWrite)) + $ bv_concat 2 (bit_to_bv flags.(PTERead)) + (bit_to_bv flags.(PTEValid)) +. + +(* Check if PTE is a pointer to next level (non-leaf) *) +Definition pte_is_ptr (flags : pte_flags) : bool := + (negb flags.(PTERead)) && (negb flags.(PTEWrite)) && (negb flags.(PTEExecute)) +. + +(* Check if PTE is valid *) +Definition pte_is_invalid (flags : pte_flags) : bool := + (negb flags.(PTEValid)) + (* TODO: security monitor does not have to check for it *) + || (flags.(PTEWrite) && negb flags.(PTERead)) +. + + +(** ** PTE *) + +(** Get the bits describing the flags *) +Definition pte_get_flags_bits (pte : bv 64) : bv 8 := + bv_extract 0 8 pte. + +(** Get the bits describing the rsw *) +Definition pte_get_rsw_bits (pte : bv 64) : bv 2 := + bv_extract 8 2 pte. + +(** Get the bits describing the ppn *) +Definition pte_ppn_length : N := 44. +Definition pte_get_ppn_bits (pte : bv 64) : bv pte_ppn_length := + bv_extract 10 pte_ppn_length pte. + +(** Get the bits for the pbmt extension *) +Definition pte_get_pbmt_bits (pte : bv 64) : bv 2 := + bv_extract (pte_ppn_length + 10) 2 pte. + +(** Get the bits for the napot extension *) +Definition pte_get_napot_bits (pte : bv 64) : bv 1 := + bv_extract (pte_ppn_length + 10 + 2) 1 pte. + + + +(** Encode a physical address for a PPN entry *) +Definition encode_physical_address_to_ppn (addr : Z) : bv pte_ppn_length := + (* Ignore the 12 lowest bits. We have to wrap around for addresses which exceed physical size, as the higher bits are required to replicate the 56th bit. *) + bv_extract 12 pte_ppn_length (Z_to_bv physical_address_length addr) +. + +(** A valid physical address should fit into 64 bits. The highest 8 bits should replicate the 56th bit. *) +Definition is_valid_physical_address (addr : Z) := + ∃ bv_addr, + Z_to_bv_checked 64 addr = Some bv_addr ∧ + Forall (λ x, bv_to_bit (N.to_nat physical_address_length) bv_addr = x) (drop (N.to_nat physical_address_length) (bv_to_bits bv_addr)). + +(** Encode a page table entry *) +Definition encode_pte + (phys_addr : Z) + (flags : pte_flags) : bv 64 := + (* The leading bits up to the PPN are zero, including pbmt and napot bits *) + bv_concat 64 (encode_physical_address_to_ppn phys_addr) (* PPN *) + $ bv_concat 10 (bv_0 2) (* RSW *) + $ pte_encode_flags flags (* flags *) +. + +(** Translate an address according to the page table located at [pt_addr] in [σ]. *) +Definition translate_address (pt_addr : Z) (σ : state) (addr : Z) : option Z := + (* TODO *) + None +. + + +(** * Logical representation *) +(** Configuration of the page table entry *) +Record page_table_config : Type := mk_ptc { + pt_accessible_to_user : bool; + pt_was_accessed : bool; + pt_is_global_mapping : bool; + pt_is_dirty : bool; +}. +Global Instance page_table_config_inh : Inhabited page_table_config := populate (mk_ptc false false false false). + + +(** Permissions for this page table entry *) +Record page_table_permission : Type := mk_ptp { + ptp_can_read : bool; + ptp_can_write : bool; + ptp_can_execute : bool; +}. +Global Instance page_table_permission_inh : Inhabited page_table_permission := populate (mk_ptp false false false). + +Definition pt_permission_pointer : page_table_permission := + mk_ptp false false false. + +(** Encode page table flags *) +Definition to_pte_flags (valid : bool) (ptc : page_table_config) (ptp : page_table_permission) : pte_flags := {| + PTEValid := valid; + PTERead := ptp.(ptp_can_read); + PTEWrite := ptp.(ptp_can_write); + PTEExecute := ptp.(ptp_can_execute); + PTEUser := ptc.(pt_accessible_to_user); + PTEGlobal := ptc.(pt_is_global_mapping); + PTEAccessed := ptc.(pt_was_accessed); + PTEDirty := ptc.(pt_is_dirty); +|}. + +(** The valid PTE flag bits. *) +Inductive pte_flags_bits := + | PTValid + | PTRead + | PTWrite + | PTExecute + | PTUser + | PTGlobal + | PTAccessed + | PTDirty +. + +(*Definition pte_flags_bits_mask *) + +(** Physical page table entries *) +Inductive page_table_entry := + | UnmappedPTE + | NextPTE (l : loc) + | DataPTE (l : loc) +. + +Record shared_page : Type := { + shared_page_hv_address : Z; + shared_page_sz : page_size; +}. + +(** Level of page tables *) +Inductive page_table_level := + | PTLevel5 + | PTLevel4 + | PTLevel3 + | PTLevel2 + | PTLevel1. + +Global Instance page_table_level_eqdec : EqDecision page_table_level. +Proof. solve_decision. Defined. +Global Instance page_table_level_inhabited : Inhabited page_table_level. +Proof. exact (populate PTLevel1). Qed. + +Definition page_table_level_lower (pt : page_table_level) : option page_table_level := + match pt with + | PTLevel5 => Some PTLevel4 + | PTLevel4 => Some PTLevel3 + | PTLevel3 => Some PTLevel2 + | PTLevel2 => Some PTLevel1 + | PTLevel1 => None + end. + +(** The highest level for this paging system *) +Definition paging_system_highest_level (system : paging_system) : page_table_level := + match system with + | Sv57x4 => PTLevel5 + end. + +Definition number_of_page_table_entries (system : paging_system) (level : page_table_level) : nat := + if decide (level = paging_system_highest_level system) then 2048%nat else 512%nat. + +(** Logical page table entries defined mutually inductively with page table trees *) +Inductive logical_page_table_entry : Type := + | PointerToNextPageTable + (next : page_table_tree) + (conf : page_table_config) + + | PageWithConfidentialVmData + (p : page) + (conf : page_table_config) + (perm : page_table_permission) + + | PageSharedWithHypervisor + (sp : shared_page) + (conf : page_table_config) + (perm : page_table_permission) + + | NotValid + +with page_table_tree := + | PageTableTree + (system : paging_system) + (serialized_addr : Z) + (entries : list logical_page_table_entry) + (level : page_table_level) +. + +Definition pt_get_system (pt : page_table_tree) : paging_system := + match pt with + | PageTableTree system _ _ _ => system + end. + +Definition pt_get_level (pt : page_table_tree) : page_table_level := + match pt with + | PageTableTree _ _ _ level => level + end. + +Definition pt_get_entries (pt : page_table_tree) : list logical_page_table_entry := + match pt with + | PageTableTree _ _ entries _ => entries + end. + +Definition pt_get_serialized_addr (pt : page_table_tree) : Z := + match pt with + | PageTableTree _ loc _ _ => loc + end. + +Definition pt_number_of_entries (pt : page_table_tree) : nat := + match pt with + | PageTableTree system _ _ level => number_of_page_table_entries system level + end. + +(** Asserts that that the level of a logical page table/page table entry is given by [l], and it is properly decreasing for children. *) +Fixpoint page_table_level_is (l : option page_table_level) (p : page_table_tree) {struct p} := + match p with + | PageTableTree sys addr entries level => + Some level = l ∧ + Forall_cb (logical_page_table_entry_level_is (page_table_level_lower level)) entries + end +with logical_page_table_entry_level_is (l : option page_table_level) (pt : logical_page_table_entry) {struct pt} := + match pt with + | PointerToNextPageTable next conf => + page_table_level_is l next + | _ => + True + end. + +(** Predicate specifying that this entry/tree has a particular paging system *) +Fixpoint page_table_tree_has_system (system : paging_system) (pt : page_table_tree) := + match pt with + | PageTableTree system' _ entries _ => + system = system' ∧ + Forall_cb (logical_page_table_entry_has_system system) entries + end +with logical_page_table_entry_has_system (system : paging_system) (pte : logical_page_table_entry) := + match pte with + | PointerToNextPageTable pt _ => + page_table_tree_has_system system pt + | _ => + True + end. + + +(** Well-formedness of page table trees *) +(* TODO: maybe make this intrinsic using dependent type *) +Definition page_table_wf (pt : page_table_tree) := + (* number of page table entries is determined by the level *) + number_of_page_table_entries (pt_get_system pt) (pt_get_level pt) = length (pt_get_entries pt) ∧ + (* ensure that levels are decreasing *) + page_table_level_is (Some $ pt_get_level pt) pt ∧ + (* ensure that the system is the same across the whole page table *) + page_table_tree_has_system (pt_get_system pt) pt +. +(* TODO: ensure everything is in confidential memory *) + +Definition page_table_is_first_level (pt : page_table_tree) := + pt_get_level pt = paging_system_highest_level (pt_get_system pt). + + +(** Empty page table tree *) +Definition make_empty_page_tree (system : paging_system) (level : page_table_level) (loc : Z) := + PageTableTree system loc (replicate (number_of_page_table_entries system level) NotValid) level. + +Lemma make_empty_page_tree_wf system level loc : + page_table_wf (make_empty_page_tree system level loc). +Proof. + split_and!; simpl. + - rewrite replicate_length//. + - split; first done. apply Forall_Forall_cb. + apply Forall_replicate. done. + - split; first done. apply Forall_Forall_cb. + apply Forall_replicate. done. +Qed. + +(** * Encoding of logical page table trees into the physical byte-level representation *) +Definition encode_logical_page_table_entry_bv (pte : logical_page_table_entry) : bv 64 := + match pte with + | PointerToNextPageTable pt ptc => + (* This is not a leaf *) + encode_pte (pt_get_serialized_addr pt) (to_pte_flags true ptc pt_permission_pointer) + | PageWithConfidentialVmData pg ptc ptp => + encode_pte pg.(page_loc).2 (to_pte_flags true ptc ptp) + | PageSharedWithHypervisor sp ptc ptp => + encode_pte sp.(shared_page_hv_address) (to_pte_flags true ptc ptp) + | NotValid => + encode_pte 0 pte_flags_invalid + end +. +Definition encode_logical_page_table_entry (pte : logical_page_table_entry) : Z := + bv_unsigned $ encode_logical_page_table_entry_bv pte. +Definition encode_page_table_entries (entries : list logical_page_table_entry) : list Z := + encode_logical_page_table_entry <$> entries +. + +(** Relation that states that [pt_byte] is a valid byte-level representation of [pt_logical]'s entries. *) +Definition is_byte_level_representation (pt_logical : page_table_tree) (pt_byte : page) := + (* The physical address matches up *) + pt_byte.(page_loc).2 = pt_get_serialized_addr pt_logical ∧ + (* The logical representation is well-formed *) + page_table_wf pt_logical ∧ + (* We have a 16KiB page for Level 5, and 4KiB pages otherwise *) + (if pt_get_level pt_logical is PTLevel5 then pt_byte.(page_sz) = Size16KiB else pt_byte.(page_sz) = Size4KiB) ∧ + (* The encoding of the entries matches the physical content of the pages *) + pt_byte.(page_val) = encode_page_table_entries (pt_get_entries pt_logical) +. + +(** Operations modifying the page table *) +Definition pt_set_entry (pt : page_table_tree) (index : nat) (entry : logical_page_table_entry) : page_table_tree := + match pt with + | PageTableTree system serialized_addr entries level => + PageTableTree system serialized_addr (<[index := entry]> entries) level + end. +Lemma pt_set_entry_system pt i en : + pt_get_system (pt_set_entry pt i en) = pt_get_system pt. +Proof. destruct pt; done. Qed. +Lemma pt_set_entry_addr pt i en : + pt_get_serialized_addr (pt_set_entry pt i en) = pt_get_serialized_addr pt. +Proof. destruct pt; done. Qed. +Lemma pt_set_entry_level pt i en : + pt_get_level (pt_set_entry pt i en) = pt_get_level pt. +Proof. destruct pt; done. Qed. +Lemma pt_set_entry_entries pt i en : + pt_get_entries (pt_set_entry pt i en) = <[i := en]> (pt_get_entries pt). +Proof. destruct pt; done. Qed. + +(** Preservation of well-formedness when setting an entry *) +Lemma pt_set_entry_wf pt pt' i en : + pt_set_entry pt i en = pt' → + logical_page_table_entry_level_is (page_table_level_lower $ pt_get_level pt) en → + logical_page_table_entry_has_system (pt_get_system pt) en → + page_table_wf pt → + page_table_wf pt'. +Proof. + intros <- Hlevel Hsystem. + intros (Hlen & Hlv & Hsys). + split_and!. + - rewrite pt_set_entry_system pt_set_entry_level. + rewrite pt_set_entry_entries insert_length//. + - rewrite pt_set_entry_level. + destruct pt; simpl in *. split; first done. + apply Forall_Forall_cb. apply Forall_insert. + { destruct Hlv as [_ Ha]. apply Forall_Forall_cb. done. } + { done. } + - rewrite pt_set_entry_system. + destruct pt; simpl in *. + split; first done. + apply Forall_Forall_cb. + apply Forall_insert. + { destruct Hsys as [_ Hsys]. apply Forall_Forall_cb; done. } + { done. } +Qed. + +(** ** Using a page table *) +(** Translate an address according to the logical page table representation *) +Definition page_table_translate_address (pt : page_table_tree) (addr : Z) : option Z := + None. + +(** State that the page table is represented at a particular root address in memory *) +Definition pt_represented_at (σ : state) (pt : page_table_tree) (pt_addr : Z) := + (* TODO *) + False. + +(** Address translation should be consistent between the logical and physical version *) +Lemma page_table_translate_address_consistent_1 (pt : page_table_tree) addr pt_addr σ translated_addr : + pt_represented_at σ pt pt_addr → + page_table_translate_address pt addr = Some translated_addr → + translate_address pt_addr σ addr = Some translated_addr. +Proof. +Abort. + +Lemma page_table_translate_address_consistent_2 pt addr pt_addr σ translated_addr : + pt_represented_at σ pt pt_addr → + translate_address pt_addr σ addr = Some translated_addr → + page_table_translate_address pt addr = Some translated_addr +. +Proof. +Abort.