From 18710b3dfa0189846cb53a828b4fca1c86a837ec Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Mon, 26 Feb 2024 02:52:36 +1100 Subject: [PATCH] Temp Signed-off-by: Klim Tsoutsman --- applications/loadc/Cargo.toml | 1 + applications/loadc/src/lib.rs | 440 ++++++++++------------- applications/test_executable/src/main.rs | 2 + kernel/crate_metadata/src/lib.rs | 7 + kernel/page_allocator/src/lib.rs | 34 +- 5 files changed, 217 insertions(+), 267 deletions(-) diff --git a/applications/loadc/Cargo.toml b/applications/loadc/Cargo.toml index 48ca745cab..0c8373945d 100644 --- a/applications/loadc/Cargo.toml +++ b/applications/loadc/Cargo.toml @@ -3,6 +3,7 @@ name = "loadc" version = "0.1.0" description = "A convenience application for loading C language ELF executables." authors = ["Kevin Boos "] +edition = "2021" [dependencies] getopts = "0.2.21" diff --git a/applications/loadc/src/lib.rs b/applications/loadc/src/lib.rs index 62ea7881fb..cf613fa978 100644 --- a/applications/loadc/src/lib.rs +++ b/applications/loadc/src/lib.rs @@ -1,38 +1,34 @@ //! An application that loads C language ELF executables atop Theseus. //! -//! This will be integrated into the Theseus kernel in the future, +//! This will be integrated into the Theseus kernel in the future, //! likely as a separate crate that integrates well with the `mod_mgmt` crate. #![no_std] extern crate alloc; -#[macro_use] extern crate log; -#[macro_use] extern crate app_io; -extern crate getopts; -extern crate fs_node; -extern crate path; -extern crate memory; -extern crate rustc_demangle; -extern crate mod_mgmt; -extern crate task; -extern crate xmas_elf; -extern crate libc; // for basic C types/typedefs used in libc +use alloc::{ + collections::BTreeSet, + format, + string::{String, ToString}, + sync::Arc, + vec::Vec, +}; use core::{ - cmp::{min, max}, + cmp::{max, min}, ops::Range, }; -use alloc::{collections::BTreeSet, string::{String, ToString}, sync::Arc, vec::Vec}; + +use app_io::println; use getopts::{Matches, Options}; -use memory::{Page, MappedPages, VirtualAddress, PteFlagsArch, PteFlags}; -use mod_mgmt::{CrateNamespace, StrongDependency, find_symbol_table, RelocationEntry, write_relocation}; +use log::{debug, error, warn}; +use memory::{MappedPages, Page, PteFlags, PteFlagsArch, VirtualAddress}; +use mod_mgmt::{ + find_symbol_table, write_relocation, CrateNamespace, RelocationEntry, StrongDependency, +}; use path::Path; use rustc_demangle::demangle; -use xmas_elf::{ - ElfFile, - program::SegmentData, - sections::ShType, -}; +use xmas_elf::{program::SegmentData, sections::ShType, ElfFile, P64}; pub fn main(args: Vec) -> isize { let mut opts = Options::new(); @@ -61,18 +57,22 @@ pub fn main(args: Vec) -> isize { } } - fn rmain(matches: Matches) -> Result { - let (curr_wd, namespace, mmi) = task::with_current_task(|curr_task| + let (curr_wd, namespace, mmi) = task::with_current_task(|curr_task| { ( curr_task.get_env().lock().working_dir.clone(), curr_task.get_namespace().clone(), curr_task.mmi.clone(), ) - ).map_err(|_| String::from("failed to get current task"))?; - - let path = matches.free.first().ok_or_else(|| "Missing path to ELF executable".to_string())?; - let file_ref = Path::new(path).get_file(&curr_wd) + }) + .map_err(|_| String::from("failed to get current task"))?; + + let path = matches + .free + .first() + .ok_or_else(|| "Missing path to ELF executable".to_string())?; + let file_ref = Path::new(path) + .get_file(&curr_wd) .ok_or_else(|| format!("Failed to access file at {path:?}"))?; let file = file_ref.lock(); @@ -83,13 +83,16 @@ fn rmain(matches: Matches) -> Result { let (mut segments, entry_point, elf_file) = parse_and_load_elf_executable(byte_slice)?; debug!("Parsed ELF executable, moving on to overwriting relocations."); - // Now, overwrite (recalculate) the relocations that refer to symbols that already exist in Theseus, - // most important of which are static data sections, - // as it is logically incorrect to have duplicates of data that are supposed to be global system-wide singletons. - // We should throw a warning here if there are no relocations in the file, as it was probably built/linked with the wrong arguments. - overwrite_relocations(&namespace, &mut segments, &elf_file, &mmi, false)?; + // Now, overwrite (recalculate) the relocations that refer to symbols that + // already exist in Theseus, most important of which are static data + // sections, as it is logically incorrect to have duplicates of data that + // are supposed to be global system-wide singletons. We should throw a + // warning here if there are no relocations in the file, as it was probably + // built/linked with the wrong arguments. overwrite_relocations(&namespace, + // &mut segments, &elf_file, &mmi, false)?; - // Remap each segment's mapped pages using the correct flags; they were previously mapped as always writable. + // Remap each segment's mapped pages using the correct flags; they were + // previously mapped as always writable. { let page_table = &mut mmi.lock().page_table; for segment in segments.iter_mut() { @@ -99,9 +102,18 @@ fn rmain(matches: Matches) -> Result { } } - segments.iter().enumerate().for_each(|(i, seg)| debug!("Segment {} needed {} relocations to be rewritten.", i, seg.sections_i_depend_on.len()) ); + segments.iter().enumerate().for_each(|(i, seg)| { + debug!( + "Segment {} needed {} relocations to be rewritten.", + i, + seg.sections_i_depend_on.len() + ) + }); - let _executable = LoadedExecutable { segments, entry_point }; // must persist through the entire executable's runtime. + let _executable = LoadedExecutable { + segments, + entry_point, + }; // must persist through the entire executable's runtime. debug!("Jumping to entry point {:#X}", entry_point); @@ -112,63 +124,69 @@ fn rmain(matches: Matches) -> Result { let start_fn: StartFunction = unsafe { core::mem::transmute(entry_point.value()) }; let c_retval = start_fn(&dummy_args, &dummy_env); - debug!("C _start entry point returned value {}({:#X})", c_retval, c_retval); + debug!( + "C _start entry point returned value {}({:#X})", + c_retval, c_retval + ); Ok(c_retval) } /// Corresponds to C function: `int foo()` use libc::c_int; -use xmas_elf::symbol_table::Entry; +use xmas_elf::{dynamic::Tag, sections::Rela, symbol_table::Entry}; type StartFunction = fn(args: &[&str], env: &[&str]) -> c_int; - #[allow(unused)] struct LoadedExecutable { segments: Vec, entry_point: VirtualAddress, } - -/// Represents an ELF program segment that has been loaded into memory. +/// Represents an ELF program segment that has been loaded into memory. #[derive(Debug)] #[allow(dead_code)] pub struct LoadedSegment { /// The memory region allocated to hold this program segment. mp: MappedPages, - /// The specific range of virtual addresses occupied by this + /// The specific range of virtual addresses occupied by this /// (may be a subset) bounds: Range, /// The proper flags for this segment specified by the ELF file. flags: PteFlagsArch, - /// The indices of the sections in the ELF file + /// The indices of the sections in the ELF file /// that were grouped ("mapped") into this segment by the linker. section_ndxs: BTreeSet, - /// The list of sections in existing Theseus crates that this segment's sections depends on, - /// i.e., the required dependencies that must exist as long as this segment. + /// The list of sections in existing Theseus crates that this segment's + /// sections depends on, i.e., the required dependencies that must exist + /// as long as this segment. sections_i_depend_on: Vec, } -/// Parses an elf executable file from the given slice of bytes and load it into memory. +/// Parses an elf executable file from the given slice of bytes and load it into +/// memory. /// /// # Important note about memory mappings /// This function will allocate new memory regions to store each program segment /// and copy each segment's data into them. -/// When this function returns, those segments will be mapped as writable in order to allow them -/// to be modified as needed. -/// Before running this executable, each segment's `MappedPages` should be remapped -/// to the proper `flags` specified in its `LoadedSegment.flags` field. +/// When this function returns, those segments will be mapped as writable in +/// order to allow them to be modified as needed. +/// Before running this executable, each segment's `MappedPages` should be +/// remapped to the proper `flags` specified in its `LoadedSegment.flags` field. /// /// # Return /// Returns a tuple of: -/// 1. A list of program segments mapped into memory. -/// 2. The virtual address of the executable's entry point, e.g., the `_start` function. -/// This is the function that we should call to start running the executable. -/// 3. The `Offset` by which all virtual addresses in the loaded executable should be shifted by. -/// This is the difference between where the program is *actually* loaded in memory -/// and where the program *expected* to be loaded into memory. -/// 4. A reference to the parsed `ElfFile`, whose lifetime is tied to the given `file_contents` parameter. +/// 1. A list of program segments mapped into memory. +/// 2. The virtual address of the executable's entry point, e.g., the `_start` +/// function. This is the function that we should call to start running the +/// executable. +/// 3. The `Offset` by which all virtual addresses in the loaded executable +/// should be shifted by. This is the difference between where the program is +/// *actually* loaded in memory and where the program *expected* to be loaded +/// into memory. +/// 4. A reference to the parsed `ElfFile`, whose lifetime is tied to the given +/// `file_contents` parameter. fn parse_and_load_elf_executable( file_contents: &[u8], ) -> Result<(Vec, VirtualAddress, ElfFile), String> { @@ -176,20 +194,26 @@ fn parse_and_load_elf_executable( let elf_file = ElfFile::new(file_contents).map_err(String::from)?; - // check that elf_file is an executable type + // check that elf_file is an executable type let typ = elf_file.header.pt2.type_().as_type(); if typ != xmas_elf::header::Type::SharedObject { - error!("parse_elf_executable(): ELF file has wrong type {:?}, must be an Executable Elf File!", typ); + error!( + "parse_elf_executable(): ELF file has wrong type {:?}, must be an Executable Elf File!", + typ + ); return Err("not an executable".into()); } let (mut start_vaddr, mut end_vaddr) = (usize::MAX, usize::MIN); let mut num_segments = 0; for prog_hdr in elf_file.program_iter() { - if prog_hdr.get_type() == Ok(xmas_elf::program::Type::Load) || prog_hdr.get_type() == Ok(xmas_elf::program::Type::Phdr) { + if prog_hdr.get_type() == Ok(xmas_elf::program::Type::Load) { num_segments += 1; start_vaddr = min(start_vaddr, prog_hdr.virtual_addr() as usize); - end_vaddr = max(end_vaddr, prog_hdr.virtual_addr() as usize + prog_hdr.mem_size() as usize); + end_vaddr = max( + end_vaddr, + prog_hdr.virtual_addr() as usize + prog_hdr.mem_size() as usize, + ); } } @@ -197,8 +221,8 @@ fn parse_and_load_elf_executable( // Allocate enough virtually-contiguous space for all the segments together. let total_size_in_bytes = end_vaddr - start_vaddr; - let mut all_pages = memory::allocate_pages_by_bytes(total_size_in_bytes - ).ok_or_else(|| format!("Failed to allocate {total_size_in_bytes}"))?; + let mut all_pages = memory::allocate_pages_by_bytes(total_size_in_bytes) + .ok_or_else(|| format!("Failed to allocate {total_size_in_bytes}"))?; let file_start = all_pages.start_address(); for section in elf_file.section_iter() { @@ -207,21 +231,26 @@ fn parse_and_load_elf_executable( } log::info!("done"); - // Iterate through each segment again and map them into pages we just allocated above, - // copying their segment data to the proper location. + // Iterate through each segment again and map them into pages we just allocated + // above, copying their segment data to the proper location. for (segment_ndx, prog_hdr) in elf_file.program_iter().enumerate() { log::info!("looking at segment {segment_ndx} {prog_hdr:#?}"); - if prog_hdr.get_type() == Ok(xmas_elf::program::Type::Load) || prog_hdr.get_type() == Ok(xmas_elf::program::Type::Phdr) { + // if prog_hdr.get_type() == Ok(xmas_elf::program::Type::Load) || + // prog_hdr.get_type() == Ok(xmas_elf::program::Type::Phdr) { + if prog_hdr.get_type() != Ok(xmas_elf::program::Type::Load) { continue; } - // A segment (program header) has two sizes: - // 1) memory size: the size in memory that the segment, when loaded, will actually consume. - // This is how much virtual memory space we have to allocate for it. - // 2) file size: the size of the segment's actual data from the ELF file itself. - // This is how much data we will actually copy from the file's segment into our allocated memory. - // The difference is primarily due to .bss sections, in which the file size will be less than the memory size. - // If memory size > file size, the difference should be filled with zeros. + // A segment (program header) has two sizes: + // 1) memory size: the size in memory that the segment, when loaded, will + // actually consume. This is how much virtual memory space we have to + // allocate for it. + // 2) file size: the size of the segment's actual data from the ELF file itself. + // This is how much data we will actually copy from the file's segment into + // our allocated memory. + // The difference is primarily due to .bss sections, in which the file size will + // be less than the memory size. If memory size > file size, the + // difference should be filled with zeros. let memory_size_in_bytes = prog_hdr.mem_size() as usize; let file_size_in_bytes = prog_hdr.file_size() as usize; if memory_size_in_bytes == 0 { @@ -238,32 +267,49 @@ fn parse_and_load_elf_executable( debug!("Splitting {:?} after end page {:?}", all_pages, end_page); - let (this_ap, remaining_pages) = all_pages.split(end_page + 1).map_err(|_ap| + let (this_ap, remaining_pages) = all_pages.split(end_page + 1).map_err(|_ap| { format!("Failed to split allocated pages {_ap:?} at page {start_vaddr:#X}") - )?; + })?; all_pages = remaining_pages; - debug!("Successfully split pages into {:?} and {:?}", this_ap, all_pages); - debug!("Adjusted segment vaddr: {:#X}, size: {:#X}, {:?}", start_vaddr, memory_size_in_bytes, this_ap.start_address()); + debug!( + "Successfully split pages into {:?} and {:?}", + this_ap, all_pages + ); + debug!( + "Adjusted segment vaddr: {:#X}, size: {:#X}, {:?}", + start_vaddr, + memory_size_in_bytes, + this_ap.start_address() + ); let initial_flags = convert_to_pte_flags(prog_hdr.flags()); let mmi = task::with_current_task(|t| t.mmi.clone()).unwrap(); - // Must initially map the memory as writable so we can copy the segment data to it later. - let mut mp = mmi.lock().page_table + // Must initially map the memory as writable so we can copy the segment data to + // it later. + let mut mp = mmi + .lock() + .page_table .map_allocated_pages(this_ap, initial_flags.writable(true)) .map_err(String::from)?; - // Copy data from this section into the correct offset into our newly-mapped pages - let offset_into_mp = mp.offset_of_address(start_vaddr).ok_or_else(|| + // Copy data from this section into the correct offset into our newly-mapped + // pages + let offset_into_mp = mp.offset_of_address(start_vaddr).ok_or_else(|| { format!("BUG: destination address {start_vaddr:#X} wasn't within segment's {mp:?}") - )?; + })?; match prog_hdr.get_data(&elf_file).map_err(String::from)? { SegmentData::Undefined(segment_data) => { - // debug!("Segment had undefined data of {} ({:#X}) bytes, file size {} ({:#X})", - // segment_data.len(), segment_data.len(), file_size_in_bytes, file_size_in_bytes); - let dest_slice: &mut [u8] = mp.as_slice_mut(offset_into_mp, memory_size_in_bytes).map_err(String::from)?; - dest_slice[..file_size_in_bytes].copy_from_slice(&segment_data[..file_size_in_bytes]); + // debug!("Segment had undefined data of {} ({:#X}) bytes, file size {} + // ({:#X})", segment_data.len(), segment_data.len(), + // file_size_in_bytes, file_size_in_bytes); + let dest_slice: &mut [u8] = mp + .as_slice_mut(offset_into_mp, memory_size_in_bytes) + .map_err(String::from)?; + dest_slice[..file_size_in_bytes] + .copy_from_slice(&segment_data[..file_size_in_bytes]); if memory_size_in_bytes > file_size_in_bytes { - // debug!(" Zero-filling extra bytes for segment from range [{}:{}).", file_size_in_bytes, dest_slice.len()); + // debug!(" Zero-filling extra bytes for segment from range [{}:{}).", + // file_size_in_bytes, dest_slice.len()); dest_slice[file_size_in_bytes..].fill(0); } } @@ -272,7 +318,7 @@ fn parse_and_load_elf_executable( } }; - let segment_bounds = start_vaddr .. (start_vaddr + memory_size_in_bytes); + let segment_bounds = start_vaddr..(start_vaddr + memory_size_in_bytes); // Populate the set of sections that comprise this segment. let mut section_ndxs = BTreeSet::new(); @@ -282,186 +328,80 @@ fn parse_and_load_elf_executable( } } - debug!("Loaded segment {} at {:X?} contains sections: {:?}", segment_ndx, segment_bounds, section_ndxs); + debug!( + "Loaded segment {} at {:X?} contains sections: {:?}", + segment_ndx, segment_bounds, section_ndxs + ); mapped_segments.push(LoadedSegment { mp, bounds: segment_bounds, flags: initial_flags.into(), section_ndxs, - sections_i_depend_on: Vec::new(), // this is populated later in `overwrite_relocations()` + sections_i_depend_on: Vec::new(), /* this is populated later in + * `overwrite_relocations()` */ }); } - let entry_offset = VirtualAddress::new(elf_file.header.pt2.entry_point() as usize).ok_or("invalid entry point address")?; - let entry_vaddr = entry_offset + all_pages.start_address(); - - debug!("ELF had entry point {:#X}, adjusted to {:#X}", entry_offset, entry_vaddr); - - Ok((mapped_segments, entry_vaddr, elf_file)) -} - - + let mut relocation_table_offset = None; + let mut relocation_table_size = None; + let mut relocation_entry_size = None; -/// This function uses the relocation sections in the given `ElfFile` to -/// rewrite relocations that depend on source sections already existing and currently loaded in Theseus. -/// -/// This is necessary to ensure that the newly-loaded ELF executable depends on and references -/// the real singleton instances of each data sections (aka `OBJECT`s in ELF terminology) -/// rather than using the duplicate instance of those data sections in the executable itself. -fn overwrite_relocations( - namespace: &Arc, - segments: &mut [LoadedSegment], - elf_file: &ElfFile, - mmi: &memory::MmiRef, - verbose_log: bool -) -> Result<(), String> { - let symtab = find_symbol_table(elf_file)?; - - // Fix up the sections that were just loaded, using proper relocation info. - // Iterate over every non-zero relocation section in the file - for sec in elf_file.section_iter().filter(|sec| sec.get_type() == Ok(ShType::Rela) && sec.size() != 0) { - use xmas_elf::sections::SectionData::Rela64; - if verbose_log { - trace!("Found Rela section name: {:?}, type: {:?}, target_sec_index: {:?}", - sec.get_name(elf_file), sec.get_type(), sec.info() - ); - } - - let rela_sec_name = sec.get_name(elf_file).unwrap(); - // Skip debug special sections for now, those can be processed later. - if rela_sec_name.starts_with(".rela.debug") { - continue; - } - // Skip .eh_frame relocations, since they are all local to the .text section - // and cannot depend on external symbols directly - if rela_sec_name == ".rela.eh_frame" { - continue; + for (segment_ndx, prog_hdr) in elf_file.program_iter().enumerate() { + log::info!("looking at segment {segment_ndx} {prog_hdr:#?}"); + if let Ok(SegmentData::Dynamic64(list)) = prog_hdr.get_data(&elf_file) { + for entry in list { + if let Ok(tag) = entry.get_tag() { + match tag { + Tag::Rela => { + relocation_table_offset = Some(entry.get_ptr().unwrap()); + } + Tag::RelaSize => relocation_table_size = Some(entry.get_val().unwrap()), + Tag::RelaEnt => relocation_entry_size = Some(entry.get_val().unwrap()), + _ => log::warn!("unhandled dyn tag: {tag:?}"), + } + } else { + log::error!("Error decoding tag"); + } + } } + } - let rela_array = match sec.get_data(elf_file) { - Ok(Rela64(rela_arr)) => rela_arr, - _ => { - let err = format!("Found Rela section that wasn't able to be parsed as Rela64: {sec:?}"); - error!("{}", err); - return Err(err); - } + // The offset of the relocation table. + let relocation_table_offset = relocation_table_offset.unwrap() as usize; + let relocation_table_size = relocation_table_size.unwrap(); + let relocation_entry_size = relocation_entry_size.unwrap(); + + let ptr = (file_start + relocation_table_offset).value() as *const Rela; + let len = (relocation_table_size / relocation_entry_size) as usize; + assert_eq!( + relocation_table_size % relocation_entry_size, + 0, + "relocation table size wasn't a multiple of entry size" + ); + let rela_table = unsafe { core::slice::from_raw_parts(ptr, len) }; + + for rela in rela_table { + let x = rela.get_symbol_table_index(); + log::info!("X: {x:0x?}"); + let entry = RelocationEntry::from_elf_relocation(rela); + let slice = unsafe { + core::slice::from_raw_parts_mut(file_start.value() as *mut u8, total_size_in_bytes) }; + write_relocation(entry, slice, 0, file_start, false).unwrap(); + } - // The target section (segment) is where we write the relocation data to. - // The source section is where we get the data from. - // There is one target section per rela section (`rela_array`), and one source section per `rela_entry` in each `rela_array`. - // The "info" field in the Rela section specifies which section is the target of the relocation. - - // Get the target section (that we already loaded) for this rela_array Rela section. - let target_sec_shndx = sec.info() as usize; - let target_segment = segments.iter_mut() - .find(|seg| seg.section_ndxs.contains(&target_sec_shndx)) - .ok_or_else(|| { - let err = format!("ELF file error: couldn't find loaded segment that contained section for Rela section {:?}!", sec.get_name(elf_file)); - error!("{}", err); - err - })?; - - let mut target_segment_dependencies: Vec = Vec::new(); - let target_segment_start_addr = target_segment.bounds.start; - let target_segment_slice: &mut [u8] = target_segment.mp.as_slice_mut( - 0, - target_segment.bounds.end.value() - target_segment.bounds.start.value(), - )?; - - // iterate through each relocation entry in the relocation array for the target_sec - for rela_entry in rela_array { - use xmas_elf::symbol_table::{Type, Entry}; - let source_sec_entry = &symtab[rela_entry.get_symbol_table_index() as usize]; - - // Ignore relocations that refer/point to irrelevant things: sections, files, notypes, or nothing. - match source_sec_entry.get_type() { - Err(_) | Ok(Type::NoType) | Ok(Type::Section) | Ok(Type::File) => continue, - _ => { } // keep going to process the relocation - } - if verbose_log { - trace!(" Rela64 entry has offset: {:#X}, addend: {:#X}, symtab_index: {}, type: {:#X}", - rela_entry.get_offset(), rela_entry.get_addend(), rela_entry.get_symbol_table_index(), rela_entry.get_type()); - } - - let source_sec_shndx = source_sec_entry.shndx() as usize; - let source_sec_name = match source_sec_entry.get_name(elf_file) { - Ok(name) => name, - _ => continue, - }; - - if verbose_log { - let source_sec_header_name = source_sec_entry.get_section_header(elf_file, rela_entry.get_symbol_table_index() as usize) - .and_then(|s| s.get_name(elf_file)); - trace!(" --> Points to relevant section [{}]: {:?}", source_sec_shndx, source_sec_header_name); - trace!(" Entry name {} {:?} vis {:?} bind {:?} type {:?} shndx {} value {} size {}", - source_sec_entry.name(), source_sec_entry.get_name(elf_file), - source_sec_entry.get_other(), source_sec_entry.get_binding(), source_sec_entry.get_type(), - source_sec_entry.shndx(), source_sec_entry.value(), source_sec_entry.size()); - } - let demangled = demangle(source_sec_name).to_string(); - - // If the source section exists in this namespace already, rewrite the relocation entry to point to the existing section instead. - if let Some(existing_source_sec) = namespace.get_symbol_or_load(&demangled, None, mmi, verbose_log).upgrade() { - let mut relocation_entry = RelocationEntry::from_elf_relocation(rela_entry); - let original_relocation_offset = relocation_entry.offset; - - // Here, in an executable ELF file, the relocation entry's "offset" represents an absolute virtual address - // rather than an offset from the beginning of the section/segment (I think). - // Therefore, we need to adjust that value before we invoke `write_relocation()`, - // which expects a regular `offset` + an offset into the target segment's mapped pages. - let relocation_offset_as_vaddr = VirtualAddress::new(relocation_entry.offset).ok_or_else(|| - format!("relocation_entry.offset {:#X} was not a valid virtual address", relocation_entry.offset) - )?; - let offset_into_target_segment = relocation_offset_as_vaddr.value() - target_segment_start_addr.value(); - // Now that we have incorporated the relocation_entry's actual offset into the target_segment offset, - // we set it to zero for the duration of this call. - // TODO: this is hacky as hell, we should just create a new `write_relocation()` function instead. - relocation_entry.offset = 0; - - if verbose_log { - debug!(" Performing relocation target {:#X} + {:#X} <-- source {}", - target_segment_start_addr, offset_into_target_segment, existing_source_sec.name - ); - } - write_relocation( - relocation_entry, - target_segment_slice, - offset_into_target_segment, - existing_source_sec.virt_addr, - verbose_log - )?; - relocation_entry.offset = original_relocation_offset; - - // Here, we typically tell the existing_source_sec that the target_segment is dependent upon it. - // However, the `WeakDependent` entry type only accepts a weak section reference at the moment, - // and we don't have that -- we only have a target segment. - // TODO: if/when we wish to track weak dependencies from a section to a target segment, we should add it below. - // - // let weak_dep = WeakDependent { - // section: Arc::downgrade(&target_sec), - // relocation: relocation_entry, - // }; - // existing_source_sec.inner.write().sections_dependent_on_me.push(weak_dep); - - // tell the target_sec that it has a strong dependency on the existing_source_sec - let strong_dep = StrongDependency { - section: Arc::clone(&existing_source_sec), - relocation: relocation_entry, - }; - target_segment_dependencies.push(strong_dep); - } else { - trace!("Skipping relocation that points to non-Theseus section: {:?}", demangled); - } - } + let entry_offset = VirtualAddress::new(elf_file.header.pt2.entry_point() as usize) + .ok_or("invalid entry point address")?; + let entry_vaddr = entry_offset + file_start; - // debug!("Target segment dependencies: {:#X?}", target_segment_dependencies); - target_segment.sections_i_depend_on.append(&mut target_segment_dependencies); - } + debug!( + "ELF had entry point {:#X}, adjusted to {:#X}", + entry_offset, entry_vaddr + ); - Ok(()) + Ok((mapped_segments, entry_vaddr, elf_file)) } /// Converts the given ELF program flags into `PteFlags`. diff --git a/applications/test_executable/src/main.rs b/applications/test_executable/src/main.rs index e7a11a969c..3dc0c3b8d0 100644 --- a/applications/test_executable/src/main.rs +++ b/applications/test_executable/src/main.rs @@ -1,3 +1,5 @@ +#![feature(restricted_std)] + fn main() { println!("Hello, world!"); } diff --git a/kernel/crate_metadata/src/lib.rs b/kernel/crate_metadata/src/lib.rs index 683bf27cc4..28570f8a71 100644 --- a/kernel/crate_metadata/src/lib.rs +++ b/kernel/crate_metadata/src/lib.rs @@ -1123,6 +1123,13 @@ fn write_relocation_arch( if verbose_log { trace!(" target_ptr: {:p}, source_val: {:#X} (from source_sec_vaddr {:#X})", target_ref.as_ptr(), source_val, source_sec_vaddr); } target_ref.copy_from_slice(&source_val.to_ne_bytes()); } + // Used in PIE. + R_X86_64_RELATIVE => { + let target_range = target_sec_offset .. (target_sec_offset + size_of::()); + let target_ref = &mut target_sec_slice[target_range]; + let source_val = source_sec_vaddr.value().wrapping_add(relocation_entry.addend) as u64; + target_ref.copy_from_slice(&source_val.to_ne_bytes()); + } // R_X86_64_GOTTPOFF => { // // 32-bit signed PC-relative offset to the GOT entry for the IE (Initial Exec(utable) TLS model)) // debug!("R_X86_64_GOTTPOFF: {:#X?}", relocation_entry); diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index de015d3a6a..a18e5bd456 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -325,24 +325,24 @@ impl AllocatedPages

{ ) -> Result<(AllocatedPages

, AllocatedPages

), AllocatedPages

> { let end_of_first = at_page - 1; - let (first, second) = if at_page == *self.start() && at_page <= *self.end() { - let first = PageRange::

::new(at_page, *self.end()); - let second = PageRange::

::empty(); - (first, second) - } - else if at_page == (*self.end() + 1) && end_of_first >= *self.start() { - let first = PageRange::

::empty(); - let second = PageRange::

::new(*self.start(), *self.end()); + let (first, second) = if at_page == *self.start() && at_page <= *self.end() { + let first = PageRange::

::empty(); + let second = PageRange::

::new(at_page, *self.end()); (first, second) - } - else if at_page > *self.start() && end_of_first <= *self.end() { - let first = PageRange::

::new(*self.start(), end_of_first); - let second = PageRange::

::new(at_page, *self.end()); - (first, second) - } - else { - return Err(self); - }; + } + else if at_page == (*self.end() + 1) && end_of_first >= *self.start() { + let first = PageRange::

::new(*self.start(), *self.end()); + let second = PageRange::

::empty(); + (first, second) + } + else if at_page > *self.start() && end_of_first <= *self.end() { + let first = PageRange::

::new(*self.start(), end_of_first); + let second = PageRange::

::new(at_page, *self.end()); + (first, second) + } + else { + return Err(self); + }; // ensure the original AllocatedPages doesn't run its drop handler and free its pages. core::mem::forget(self);