Skip to content

Commit

Permalink
YJIT: Avoid memory write for leave return value
Browse files Browse the repository at this point in the history
  • Loading branch information
XrXr committed Oct 2, 2023
1 parent 492e943 commit e72c8da
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 29 deletions.
4 changes: 4 additions & 0 deletions KNOWNBUGS.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,7 @@
# This test file includes tests which point out known bugs.
# So all tests will cause failure.
#
print(begin
ans = []
1.times { }
end)
5 changes: 5 additions & 0 deletions yjit/src/backend/ir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -955,6 +955,7 @@ pub struct SideExitContext {
pub stack_size: u8,
pub sp_offset: i8,
pub reg_temps: RegTemps,
pub is_return_landing: bool,
}

impl SideExitContext {
Expand All @@ -965,6 +966,7 @@ impl SideExitContext {
stack_size: ctx.get_stack_size(),
sp_offset: ctx.get_sp_offset(),
reg_temps: ctx.get_reg_temps(),
is_return_landing: ctx.is_return_landing(),
};
if cfg!(debug_assertions) {
// Assert that we're not losing any mandatory metadata
Expand All @@ -979,6 +981,9 @@ impl SideExitContext {
ctx.set_stack_size(self.stack_size);
ctx.set_sp_offset(self.sp_offset);
ctx.set_reg_temps(self.reg_temps);
if self.is_return_landing {
ctx.set_as_return_landing();
}
ctx
}
}
Expand Down
47 changes: 32 additions & 15 deletions yjit/src/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,12 @@ fn gen_exit(exit_pc: *mut VALUE, asm: &mut Assembler) {
asm_comment!(asm, "exit to interpreter on {}", insn_name(opcode as usize));
}

if asm.ctx.is_return_landing() {
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
let top = asm.stack_push(Type::Unknown);
asm.mov(top, C_RET_OPND);
}

// Spill stack temps before returning to the interpreter
asm.spill_temps();

Expand Down Expand Up @@ -639,13 +645,18 @@ fn gen_leave_exception(ocb: &mut OutlinedCb) -> CodePtr {
let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();

// gen_leave() leaves the return value in C_RET_OPND before coming here.
let ruby_ret_val = asm.live_reg_opnd(C_RET_OPND);

// Every exit to the interpreter should be counted
gen_counter_incr(&mut asm, Counter::leave_interp_return);

asm_comment!(asm, "increment SP of the caller");
let sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
asm_comment!(asm, "push return value through cfp->sp");
let cfp_sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
let sp = asm.load(cfp_sp);
asm.mov(Opnd::mem(64, sp, 0), ruby_ret_val);
let new_sp = asm.add(sp, SIZEOF_VALUE.into());
asm.mov(sp, new_sp);
asm.mov(cfp_sp, new_sp);

asm_comment!(asm, "exit from exception");
asm.cpop_into(SP);
Expand Down Expand Up @@ -875,6 +886,18 @@ pub fn gen_single_block(
asm_comment!(asm, "reg_temps: {:08b}", asm.ctx.get_reg_temps().as_u8());
}

if asm.ctx.is_return_landing() {
// Continuation of the end of gen_leave().
// Reload REG_SP for the current frame and transfer the return value
// to the stack top.
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));

let top = asm.stack_push(Type::Unknown);
asm.mov(top, C_RET_OPND);

asm.ctx.clear_return_landing();
}

// For each instruction to compile
// NOTE: could rewrite this loop with a std::iter::Iterator
while insn_idx < iseq_size {
Expand Down Expand Up @@ -6526,17 +6549,14 @@ fn gen_send_iseq(
// The callee might change locals through Kernel#binding and other means.
asm.ctx.clear_local_types();

// Pop arguments and receiver in return context, push the return value
// After the return, sp_offset will be 1. The codegen for leave writes
// the return value in case of JIT-to-JIT return.
// Pop arguments and receiver in return context and
// mark it as a continuation of gen_leave()
let mut return_asm = Assembler::new();
return_asm.ctx = asm.ctx.clone();
return_asm.stack_pop(sp_offset.try_into().unwrap());
let return_val = return_asm.stack_push(Type::Unknown);
// The callee writes a return value on stack. Update reg_temps accordingly.
return_asm.ctx.dealloc_temp_reg(return_val.stack_idx());
return_asm.ctx.set_sp_offset(1);
return_asm.ctx.set_sp_offset(0); // We set SP on the caller's frame above
return_asm.ctx.reset_chain_depth();
return_asm.ctx.set_as_return_landing();

// Write the JIT return address on the callee frame
gen_branch(
Expand Down Expand Up @@ -7739,12 +7759,9 @@ fn gen_leave(
// Move the return value into the C return register for gen_leave_exit()
asm.mov(C_RET_OPND, retval_opnd);

// Reload REG_SP for the caller and write the return value.
// Top of the stack is REG_SP[0] since the caller has sp_offset=1.
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
asm.mov(Opnd::mem(64, SP, 0), C_RET_OPND);

// Jump to the JIT return address on the frame that was just popped
// TODO(outline): comment here possible continuations. gen_leave_exit(), gen_leave_exception(),
// gen_send_iseq()
let offset_to_jit_return =
-(RUBY_SIZEOF_CONTROL_FRAME as i32) + RUBY_OFFSET_CFP_JIT_RETURN;
asm.jmp_opnd(Opnd::mem(64, CFP, offset_to_jit_return));
Expand Down
70 changes: 56 additions & 14 deletions yjit/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -450,8 +450,11 @@ pub struct Context {
/// Bitmap of which stack temps are in a register
reg_temps: RegTemps,

// Depth of this block in the sidechain (eg: inline-cache chain)
chain_depth: u8,
/// Fields packed into u8
/// - Lower 7 bits: Depth of this block in the sidechain (eg: inline-cache chain)
/// - Top bit: Whether this code is the target of a JIT-to-JIT Ruby return
/// ([Self::is_return_landing])
chain_depth_plus: u8,

// Local variable types we keep track of
local_types: [Type; MAX_LOCAL_TYPES],
Expand Down Expand Up @@ -1399,7 +1402,7 @@ fn find_block_version(blockid: BlockId, ctx: &Context) -> Option<BlockRef> {
/// Produce a generic context when the block version limit is hit for a blockid
pub fn limit_block_versions(blockid: BlockId, ctx: &Context) -> Context {
// Guard chains implement limits separately, do nothing
if ctx.chain_depth > 0 {
if ctx.get_chain_depth() > 0 {
return ctx.clone();
}

Expand Down Expand Up @@ -1607,6 +1610,9 @@ impl Context {
generic_ctx.stack_size = self.stack_size;
generic_ctx.sp_offset = self.sp_offset;
generic_ctx.reg_temps = self.reg_temps;
if self.is_return_landing() {
generic_ctx.set_as_return_landing();
}
generic_ctx
}

Expand Down Expand Up @@ -1637,15 +1643,30 @@ impl Context {
}

pub fn get_chain_depth(&self) -> u8 {
self.chain_depth
self.chain_depth_plus & 0x7f
}

pub fn reset_chain_depth(&mut self) {
self.chain_depth = 0;
self.chain_depth_plus &= 0x80;
}

pub fn increment_chain_depth(&mut self) {
self.chain_depth += 1;
if self.get_chain_depth() == 0x7f {
panic!("max block version chain depth reached!");
}
self.chain_depth_plus += 1;
}

pub fn set_as_return_landing(&mut self) {
self.chain_depth_plus |= 0x80;
}

pub fn clear_return_landing(&mut self) {
self.chain_depth_plus &= 0x7f;
}

pub fn is_return_landing(&self) -> bool {
self.chain_depth_plus & 0x80 > 0
}

/// Get an operand for the adjusted stack pointer address
Expand Down Expand Up @@ -1842,13 +1863,17 @@ impl Context {
let src = self;

// Can only lookup the first version in the chain
if dst.chain_depth != 0 {
if dst.get_chain_depth() != 0 {
return TypeDiff::Incompatible;
}

// Blocks with depth > 0 always produce new versions
// Sidechains cannot overlap
if src.chain_depth != 0 {
if src.get_chain_depth() != 0 {
return TypeDiff::Incompatible;
}

if src.is_return_landing() != dst.is_return_landing() {
return TypeDiff::Incompatible;
}

Expand Down Expand Up @@ -2495,6 +2520,9 @@ fn branch_stub_hit_body(branch_ptr: *const c_void, target_idx: u32, ec: EcPtr) -
let running_iseq = rb_cfp_get_iseq(cfp);
let reconned_pc = rb_iseq_pc_at_idx(running_iseq, target_blockid.idx.into());
let reconned_sp = original_interp_sp.offset(target_ctx.sp_offset.into());
// Unlike in the interpreter, our `leave` doesn't write to the caller's
// SP—we do it in the returned-to code. Account for this difference.
let reconned_sp = reconned_sp.add(target_ctx.is_return_landing().into());

assert_eq!(running_iseq, target_blockid.iseq as _, "each stub expects a particular iseq");

Expand Down Expand Up @@ -2631,11 +2659,20 @@ fn gen_branch_stub(
asm.set_reg_temps(ctx.reg_temps);
asm_comment!(asm, "branch stub hit");

if asm.ctx.is_return_landing() {
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
let top = asm.stack_push(Type::Unknown);
asm.mov(top, C_RET_OPND);
}

// Save caller-saved registers before C_ARG_OPNDS get clobbered.
// Spill all registers for consistency with the trampoline.
for &reg in caller_saved_temp_regs().iter() {
asm.cpush(reg);
}
// Also save C_RET_REG, used by gen_leave()
asm.cpush(C_RET_OPND);
asm.cpush(C_RET_OPND);

// Spill temps to the VM stack as well for jit.peek_at_stack()
asm.spill_temps();
Expand Down Expand Up @@ -2683,14 +2720,22 @@ pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
EC,
]
);
let jump_addr2 = asm.load(jump_addr);

// Restore return reg
asm.cpop_into(C_RET_OPND);
asm.cpop_into(C_RET_OPND);
// Restore caller-saved registers for stack temps
for &reg in caller_saved_temp_regs().iter().rev() {
asm.cpop_into(reg);
}

// Jump to the address returned by the branch_stub_hit() call
asm.jmp_opnd(jump_addr);
asm.jmp_opnd(jump_addr2);

asm.test(jump_addr, 0usize.into()); // HACK: maintain liveness of c return reg so we get a
// scratch register for keeping the return value of
// branch_stub_hit().

asm.compile(ocb, None);

Expand Down Expand Up @@ -2831,16 +2876,13 @@ pub fn defer_compilation(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) {
if asm.ctx.chain_depth != 0 {
if asm.ctx.get_chain_depth() != 0 {
panic!("Double defer!");
}

let mut next_ctx = asm.ctx.clone();

if next_ctx.chain_depth == u8::MAX {
panic!("max block version chain depth reached!");
}
next_ctx.chain_depth += 1;
next_ctx.increment_chain_depth();

let branch = new_pending_branch(jit, BranchGenFn::JumpToTarget0(Cell::new(BranchShape::Default)));

Expand Down

0 comments on commit e72c8da

Please sign in to comment.