Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor - Fine tunes inline hints in JIT #32

Merged
merged 2 commits into from
Feb 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions src/jit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -844,7 +844,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
Ok(self.result)
}

#[inline]
fn should_sanitize_constant(&self, value: i64) -> bool {
if !self.config.sanitize_user_provided_values {
return false;
Expand All @@ -864,12 +863,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn slot_in_vm(&self, slot: RuntimeEnvironmentSlot) -> i32 {
8 * (slot as i32 - self.runtime_environment_key)
}

#[inline]
pub(crate) fn emit<T>(&mut self, data: T) {
unsafe {
let ptr = self.result.text_section.as_ptr().add(self.offset_in_text_section);
Expand All @@ -879,7 +876,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.offset_in_text_section += mem::size_of::<T>();
}

#[inline]
pub(crate) fn emit_variable_length(&mut self, size: OperandSize, data: u64) {
match size {
OperandSize::S0 => {},
Expand All @@ -903,7 +899,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_sanitized_load_immediate(&mut self, destination: X86Register, value: i64) {
let lower_key = self.immediate_value_key as i32 as i64;
if value >= i32::MIN as i64 && value <= i32::MAX as i64 {
Expand All @@ -926,7 +921,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_sanitized_alu(&mut self, size: OperandSize, opcode: u8, opcode_extension: u8, destination: X86Register, immediate: i64) {
if self.should_sanitize_constant(immediate) {
self.emit_sanitized_load_immediate(REGISTER_SCRATCH, immediate);
Expand All @@ -940,7 +934,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}

#[allow(dead_code)]
#[inline]
fn emit_stopwatch(&mut self, begin: bool) {
self.stopwatch_is_active = true;
self.emit_ins(X86Instruction::push(RDX, None));
Expand All @@ -960,7 +953,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::pop(RDX));
}

#[inline]
fn emit_validate_instruction_count(&mut self, pc: Option<usize>) {
if !self.config.enable_instruction_meter {
return;
Expand All @@ -975,7 +967,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::conditional_jump_immediate(0x86, self.relative_to_anchor(ANCHOR_THROW_EXCEEDED_MAX_INSTRUCTIONS, 6)));
}

#[inline]
fn emit_profile_instruction_count(&mut self, target_pc: Option<usize>) {
if !self.config.enable_instruction_meter {
return;
Expand All @@ -991,14 +982,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_undo_profile_instruction_count(&mut self, target_pc: usize) {
if self.config.enable_instruction_meter {
self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc as i64); // instruction_meter += (self.pc + 1) - target_pc;
}
}

#[inline]
fn emit_validate_and_profile_instruction_count(&mut self, target_pc: Option<usize>) {
self.emit_validate_instruction_count(Some(self.pc));
self.emit_profile_instruction_count(target_pc);
Expand Down Expand Up @@ -1114,7 +1103,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_internal_call(&mut self, dst: Value) {
// Store PC in case the bounds check fails
self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.pc as i64));
Expand Down Expand Up @@ -1156,15 +1144,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_syscall_dispatch(&mut self, function: BuiltinFunction<C>) {
self.emit_validate_and_profile_instruction_count(Some(0));
self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, function as usize as i64));
self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_EXTERNAL_FUNCTION_CALL, 5)));
self.emit_undo_profile_instruction_count(0);
}

#[inline]
fn emit_address_translation(&mut self, dst: Option<X86Register>, vm_addr: Value, len: u64, value: Option<Value>) {
debug_assert_ne!(dst.is_some(), value.is_some());

Expand Down Expand Up @@ -1231,7 +1217,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
}
}

#[inline]
fn emit_conditional_branch_reg(&mut self, op: u8, bitwise: bool, first_operand: X86Register, second_operand: X86Register, target_pc: usize) {
self.emit_validate_and_profile_instruction_count(Some(target_pc));
if bitwise { // Logical
Expand All @@ -1245,7 +1230,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_undo_profile_instruction_count(target_pc);
}

#[inline]
fn emit_conditional_branch_imm(&mut self, op: u8, bitwise: bool, immediate: i64, second_operand: X86Register, target_pc: usize) {
self.emit_validate_and_profile_instruction_count(Some(target_pc));
if self.should_sanitize_constant(immediate) {
Expand Down Expand Up @@ -1666,15 +1650,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {

// instruction_length = 5 (Unconditional jump / call)
// instruction_length = 6 (Conditional jump)
#[inline]
fn relative_to_anchor(&self, anchor: usize, instruction_length: usize) -> i32 {
let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) };
let destination = self.anchors[anchor];
debug_assert!(!destination.is_null());
(unsafe { destination.offset_from(instruction_end) } as i32) // Relative jump
}

#[inline]
fn relative_to_target_pc(&mut self, target_pc: usize, instruction_length: usize) -> i32 {
let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) };
let destination = if self.result.pc_section[target_pc] != 0 {
Expand Down
33 changes: 1 addition & 32 deletions src/x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ impl X86Instruction {
immediate: 0,
};

#[inline]
#[inline(always)]
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the important bit

pub fn emit<C: ContextObject>(&self, jit: &mut JitCompiler<C>) {
debug_assert!(!matches!(self.size, OperandSize::S0));
let mut rex = X86Rex {
Expand Down Expand Up @@ -197,7 +197,6 @@ impl X86Instruction {
}

/// Arithmetic or logic
#[inline]
pub const fn alu(
size: OperandSize,
opcode: u8,
Expand All @@ -217,7 +216,6 @@ impl X86Instruction {
}

/// Arithmetic or logic
#[inline]
pub const fn alu_immediate(
size: OperandSize,
opcode: u8,
Expand Down Expand Up @@ -245,7 +243,6 @@ impl X86Instruction {
}

/// Move source to destination
#[inline]
pub const fn mov(size: OperandSize, source: X86Register, destination: X86Register) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16);
Self {
Expand All @@ -258,7 +255,6 @@ impl X86Instruction {
}

/// Move source to destination
#[inline]
pub const fn mov_with_sign_extension(
size: OperandSize,
source: X86Register,
Expand All @@ -276,7 +272,6 @@ impl X86Instruction {

/// Move to / from / between MMX (float mantissa)
#[allow(dead_code)]
#[inline]
pub const fn mov_mmx(size: OperandSize, source: X86Register, destination: X86Register) -> Self {
exclude_operand_sizes!(
size,
Expand Down Expand Up @@ -305,7 +300,6 @@ impl X86Instruction {
}

/// Conditionally move source to destination
#[inline]
pub const fn cmov(
size: OperandSize,
condition: u8,
Expand All @@ -324,7 +318,6 @@ impl X86Instruction {
}

/// Swap source and destination
#[inline]
pub const fn xchg(
size: OperandSize,
source: X86Register,
Expand All @@ -346,7 +339,6 @@ impl X86Instruction {
}

/// Swap byte order of destination
#[inline]
pub const fn bswap(size: OperandSize, destination: X86Register) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8);
match size {
Expand All @@ -371,7 +363,6 @@ impl X86Instruction {
}

/// Test source and destination
#[inline]
pub const fn test(
size: OperandSize,
source: X86Register,
Expand All @@ -394,7 +385,6 @@ impl X86Instruction {
}

/// Test immediate and destination
#[inline]
pub const fn test_immediate(
size: OperandSize,
destination: X86Register,
Expand Down Expand Up @@ -423,7 +413,6 @@ impl X86Instruction {
}

/// Compare source and destination
#[inline]
pub const fn cmp(
size: OperandSize,
source: X86Register,
Expand All @@ -446,7 +435,6 @@ impl X86Instruction {
}

/// Compare immediate and destination
#[inline]
pub const fn cmp_immediate(
size: OperandSize,
destination: X86Register,
Expand Down Expand Up @@ -475,7 +463,6 @@ impl X86Instruction {
}

/// Load effective address of source into destination
#[inline]
pub const fn lea(
size: OperandSize,
source: X86Register,
Expand All @@ -497,7 +484,6 @@ impl X86Instruction {
}

/// Convert word to doubleword or doubleword to quadword
#[inline]
pub const fn sign_extend_rax_rdx(size: OperandSize) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16);
Self {
Expand All @@ -509,7 +495,6 @@ impl X86Instruction {
}

/// Load destination from [source + offset]
#[inline]
pub const fn load(
size: OperandSize,
source: X86Register,
Expand Down Expand Up @@ -540,7 +525,6 @@ impl X86Instruction {
}

/// Store source in [destination + offset]
#[inline]
pub const fn store(
size: OperandSize,
source: X86Register,
Expand All @@ -562,7 +546,6 @@ impl X86Instruction {
}

/// Load destination from immediate
#[inline]
pub const fn load_immediate(destination: X86Register, immediate: i64) -> Self {
let mut size = OperandSize::S64;
if immediate >= 0 {
Expand Down Expand Up @@ -594,7 +577,6 @@ impl X86Instruction {
}

/// Store sign-extended immediate in destination
#[inline]
pub const fn store_immediate(
size: OperandSize,
destination: X86Register,
Expand All @@ -621,7 +603,6 @@ impl X86Instruction {
}

/// Push source onto the stack
#[inline]
pub const fn push_immediate(size: OperandSize, immediate: i32) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S16);
Self {
Expand All @@ -642,7 +623,6 @@ impl X86Instruction {
}

/// Push source onto the stack
#[inline]
pub const fn push(source: X86Register, indirect: Option<X86IndirectAccess>) -> Self {
if indirect.is_none() {
Self {
Expand All @@ -666,7 +646,6 @@ impl X86Instruction {
}

/// Pop from the stack into destination
#[inline]
pub const fn pop(destination: X86Register) -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -678,7 +657,6 @@ impl X86Instruction {
}

/// Jump to relative destination on condition
#[inline]
pub const fn conditional_jump_immediate(opcode: u8, relative_destination: i32) -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -692,7 +670,6 @@ impl X86Instruction {
}

/// Jump to relative destination
#[inline]
pub const fn jump_immediate(relative_destination: i32) -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -706,7 +683,6 @@ impl X86Instruction {

/// Jump to absolute destination
#[allow(dead_code)]
#[inline]
pub const fn jump_reg(destination: X86Register, indirect: Option<X86IndirectAccess>) -> Self {
Self {
size: OperandSize::S64,
Expand All @@ -719,7 +695,6 @@ impl X86Instruction {
}

/// Push RIP and jump to relative destination
#[inline]
pub const fn call_immediate(relative_destination: i32) -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -732,7 +707,6 @@ impl X86Instruction {
}

/// Push RIP and jump to absolute destination
#[inline]
pub const fn call_reg(destination: X86Register, indirect: Option<X86IndirectAccess>) -> Self {
Self {
size: OperandSize::S64,
Expand All @@ -745,7 +719,6 @@ impl X86Instruction {
}

/// Pop RIP
#[inline]
pub const fn return_near() -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -757,7 +730,6 @@ impl X86Instruction {

/// No operation
#[allow(dead_code)]
#[inline]
pub const fn noop() -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -769,7 +741,6 @@ impl X86Instruction {

/// Trap / software interrupt
#[allow(dead_code)]
#[inline]
pub const fn interrupt(immediate: u8) -> Self {
if immediate == 3 {
Self {
Expand All @@ -791,7 +762,6 @@ impl X86Instruction {
}

/// rdtsc
#[inline]
pub const fn cycle_count() -> Self {
Self {
size: OperandSize::S32,
Expand All @@ -804,7 +774,6 @@ impl X86Instruction {

/// lfence / sfence / mfence
#[allow(dead_code)]
#[inline]
pub const fn fence(fence_type: FenceType) -> Self {
Self {
size: OperandSize::S32,
Expand Down
Loading