Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions bootstraptest/test_yjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5486,3 +5486,52 @@ def test = foo(1)
test
test
}

# regression test for tracing invalidation with on-stack compiled methods
# Exercises the on_stack_iseqs path in rb_yjit_tracing_invalidate_all
# where delayed deallocation must not create aliasing &mut references
# to IseqPayload (use-after-free of version_map backing storage).
assert_normal_exit %q{
def deep = 42
def mid = deep
def outer = mid

# Compile all three methods with YJIT
10.times { outer }

# Enable tracing from within a call chain so that outer/mid/deep
# are on the stack when rb_yjit_tracing_invalidate_all runs.
# This triggers the on_stack_iseqs (delayed deallocation) path.
def deep
TracePoint.new(:line) {}.enable
42
end

outer

# After invalidation, verify YJIT can recompile and run correctly
def deep = 42
10.times { outer }
}

# regression test for tracing invalidation with on-stack fibers
# Suspended fibers have iseqs on their stack that must survive invalidation.
assert_equal '42', %q{
def compiled_method
Fiber.yield
42
end

# Compile the method
10.times { compiled_method rescue nil }

fiber = Fiber.new { compiled_method }
fiber.resume # suspends inside compiled_method — it's now on the fiber's stack

# Enable tracing while compiled_method is on the fiber's stack.
# This triggers rb_yjit_tracing_invalidate_all with on-stack iseqs.
TracePoint.new(:call) {}.enable

# Resume the fiber — compiled_method's iseq must still be valid
fiber.resume.to_s
}
3 changes: 2 additions & 1 deletion tool/zjit_bisect.rb
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@ def add_zjit_options cmd
cmd[run_opts_index] = "RUN_OPTS=#{run_opts.shelljoin}"
elsif specopts_index
specopts = Shellwords.split(cmd[specopts_index].delete_prefix("SPECOPTS="))
specopts.concat(zjit_opts)
# SPECOPTS needs -T before each option to pass it through mspec to Ruby
zjit_opts.each { |opt| specopts.concat(["-T", opt]) }
cmd[specopts_index] = "SPECOPTS=#{specopts.shelljoin}"
else
raise "Expected RUN_OPTS or SPECOPTS to be present in make command"
Expand Down
97 changes: 50 additions & 47 deletions yjit/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1769,6 +1769,35 @@ impl IseqPayload {
// Turn it into an iterator that owns the blocks and return
version_map.into_iter().flatten()
}

/// Get or create all blocks for a particular place in an iseq.
fn get_or_create_version_list(&mut self, insn_idx: usize) -> &mut VersionList {
if insn_idx >= self.version_map.len() {
self.version_map.resize(insn_idx + 1, VersionList::default());
}

return self.version_map.get_mut(insn_idx).unwrap();
}

// We cannot deallocate blocks immediately after invalidation since patching
// the code for setting up return addresses does not affect outstanding return
// addresses that are on stack and will use invalidated branch pointers when
// hit. Example:
// def foo(n)
// if n == 2
// # 1.times.each to create a cfunc frame to preserve the JIT frame
// # which will return to a stub housed in an invalidated block
// return 1.times.each { Object.define_method(:foo) {} }
// end
//
// foo(n + 1) # The block for this call houses the return branch stub
// end
// p foo(1)
pub fn delayed_deallocation(&mut self, blockref: BlockRef) {
block_assumptions_free(blockref);
unsafe { blockref.as_ref() }.set_iseq_null();
self.dead_blocks.push(blockref);
}
}

/// Get the payload for an iseq. For safety it's up to the caller to ensure the returned `&mut`
Expand Down Expand Up @@ -2140,21 +2169,6 @@ fn get_version_list(blockid: BlockId) -> Option<&'static mut VersionList> {
}
}

/// Get or create all blocks for a particular place in an iseq.
fn get_or_create_version_list(blockid: BlockId) -> &'static mut VersionList {
let payload = get_or_create_iseq_payload(blockid.iseq);
let insn_idx = blockid.idx.as_usize();

// Expand the version map as necessary
if insn_idx >= payload.version_map.len() {
payload
.version_map
.resize(insn_idx + 1, VersionList::default());
}

return payload.version_map.get_mut(insn_idx).unwrap();
}

/// Take all of the blocks for a particular place in an iseq
pub fn take_version_list(blockid: BlockId) -> VersionList {
let insn_idx = blockid.idx.as_usize();
Expand Down Expand Up @@ -2343,15 +2357,21 @@ unsafe fn add_block_version(blockref: BlockRef, cb: &CodeBlock) {
// Function entry blocks must have stack size 0
debug_assert!(!(block.iseq_range.start == 0 && Context::decode(block.ctx).stack_size > 0));

let version_list = get_or_create_version_list(block.get_blockid());
// Use a single payload reference for both version_map and pages access
// to avoid mutable aliasing UB from multiple get_iseq_payload calls.
let iseq_payload = get_or_create_iseq_payload(block.iseq.get());
let insn_idx = block.get_blockid().idx.as_usize();

// If this the first block being compiled with this block id
if version_list.len() == 0 {
incr_counter!(compiled_blockid_count);
}
{
let version_list = iseq_payload.get_or_create_version_list(insn_idx);

version_list.push(blockref);
version_list.shrink_to_fit();
if version_list.is_empty() {
incr_counter!(compiled_blockid_count);
}

version_list.push(blockref);
version_list.shrink_to_fit();
}

// By writing the new block to the iseq, the iseq now
// contains new references to Ruby objects. Run write barriers.
Expand All @@ -2376,7 +2396,6 @@ unsafe fn add_block_version(blockref: BlockRef, cb: &CodeBlock) {
}

// Mark code pages for code GC
let iseq_payload = get_iseq_payload(block.iseq.get()).unwrap();
for page in cb.addrs_to_pages(block.start_addr, block.end_addr.get()) {
iseq_payload.pages.insert(page);
}
Expand Down Expand Up @@ -2495,6 +2514,11 @@ impl Block {
self.incoming.push(branch);
}

/// Mark this block as dead by setting its iseq to null.
pub fn set_iseq_null(&self) {
self.iseq.replace(ptr::null());
}

// Compute the size of the block code
pub fn code_size(&self) -> usize {
(self.end_addr.get().as_offset() - self.start_addr.as_offset()).try_into().unwrap()
Expand Down Expand Up @@ -4298,37 +4322,16 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
// in this function before we removed it, so it's well connected.
unsafe { remove_from_graph(*blockref) };

delayed_deallocation(*blockref);
if let Some(payload) = get_iseq_payload(id_being_invalidated.iseq) {
payload.delayed_deallocation(*blockref);
}

ocb.unwrap().mark_all_executable();
cb.mark_all_executable();

incr_counter!(invalidation_count);
}

// We cannot deallocate blocks immediately after invalidation since patching the code for setting
// up return addresses does not affect outstanding return addresses that are on stack and will use
// invalidated branch pointers when hit. Example:
// def foo(n)
// if n == 2
// # 1.times.each to create a cfunc frame to preserve the JIT frame
// # which will return to a stub housed in an invalidated block
// return 1.times.each { Object.define_method(:foo) {} }
// end
//
// foo(n + 1) # The block for this call houses the return branch stub
// end
// p foo(1)
pub fn delayed_deallocation(blockref: BlockRef) {
block_assumptions_free(blockref);

let block = unsafe { blockref.as_ref() };
// Set null ISEQ on the block to signal that it's dead.
let iseq = block.iseq.replace(ptr::null());
let payload = get_iseq_payload(iseq).unwrap();
payload.dead_blocks.push(blockref);
}

trait RefUnchecked {
type Contained;
unsafe fn ref_unchecked(&self) -> &Self::Contained;
Expand Down
2 changes: 1 addition & 1 deletion yjit/src/invariants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
if on_stack_iseqs.contains(&iseq) {
// This ISEQ is running, so we can't free blocks immediately
for block in blocks {
delayed_deallocation(block);
payload.delayed_deallocation(block);
}
payload.dead_blocks.shrink_to_fit();
} else {
Expand Down