Skip to content

Commit

Permalink
don't grow heap unless it is paid for
Browse files Browse the repository at this point in the history
  • Loading branch information
joonazan committed Sep 26, 2024
1 parent 24de402 commit abc0225
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 57 deletions.
52 changes: 19 additions & 33 deletions crates/vm2/src/instruction_handlers/far_call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,7 @@ where
abi.is_constructor_call,
);

// calldata has to be constructed even if we already know we will panic because
// overflowing start + length makes the heap resize even when already panicking.
let already_failed = decommit_result.is_none() || IS_SHARD && abi.shard_id != 0;

let maybe_calldata =
get_far_call_calldata(raw_abi, raw_abi_is_pointer, vm, already_failed);
let maybe_calldata = get_far_call_calldata(raw_abi, raw_abi_is_pointer, vm);

// mandated gas is passed even if it means transferring more than the 63/64 rule allows
if let Some(gas_left) = vm.state.current_frame.gas.checked_sub(mandated_gas) {
Expand All @@ -90,6 +85,9 @@ where
return None;
};

if IS_SHARD && abi.shard_id != 0 {
return None;
}
let calldata = maybe_calldata?;
let (unpaid_decommit, is_evm) = decommit_result?;
let program = vm.world_diff.pay_for_decommit(
Expand Down Expand Up @@ -188,7 +186,6 @@ pub(crate) fn get_far_call_calldata<T, W>(
raw_abi: U256,
is_pointer: bool,
vm: &mut VirtualMachine<T, W>,
already_failed: bool,
) -> Option<FatPointer> {
let mut pointer = FatPointer::from(raw_abi);
#[allow(clippy::cast_possible_truncation)]
Expand All @@ -197,40 +194,29 @@ pub(crate) fn get_far_call_calldata<T, W>(

match FatPointerSource::from_abi(raw_source) {
FatPointerSource::ForwardFatPointer => {
if !is_pointer || pointer.offset > pointer.length || already_failed {
if !is_pointer || pointer.offset > pointer.length {
return None;
}

pointer.narrow();
}
FatPointerSource::MakeNewPointer(target) => {
if let Some(bound) = pointer.start.checked_add(pointer.length) {
if is_pointer || pointer.offset != 0 || already_failed {
return None;
}
match target {
FatPointerTarget::ToHeap => {
grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?;
pointer.memory_page = vm.state.current_frame.heap;
}
FatPointerTarget::ToAuxHeap => {
grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?;
pointer.memory_page = vm.state.current_frame.aux_heap;
}
if is_pointer || pointer.offset != 0 {
return None;
}
let bound = pointer
.start
.checked_add(pointer.length)
.unwrap_or(u32::MAX);
match target {
FatPointerTarget::ToHeap => {
grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?;
pointer.memory_page = vm.state.current_frame.heap;
}
} else {
// The heap is grown even if the pointer goes out of the heap
// TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
let bound = u32::MAX;
match target {
FatPointerTarget::ToHeap => {
grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?;
}
FatPointerTarget::ToAuxHeap => {
grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?;
}
FatPointerTarget::ToAuxHeap => {
grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?;
pointer.memory_page = vm.state.current_frame.aux_heap;
}
return None;
}
}
}
Expand Down
38 changes: 15 additions & 23 deletions crates/vm2/src/instruction_handlers/heap_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,22 +74,19 @@ fn load<T: Tracer, W, H: HeapFromState, In: Source, const INCREMENT: bool>(
// They will panic, though because they are larger than 2^32.
let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state);

let address = pointer.low_u32();
if bigger_than_last_address(pointer) {
let _ = vm.state.use_gas(u32::MAX);
vm.state.current_frame.pc = spontaneous_panic();
return;
}

let address = pointer.low_u32();
let new_bound = address.wrapping_add(32);
if grow_heap::<_, _, H>(&mut vm.state, new_bound).is_err() {
vm.state.current_frame.pc = spontaneous_panic();
return;
};

// The heap is always grown even when the index nonsensical.
// TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
if bigger_than_last_address(pointer) {
let _ = vm.state.use_gas(u32::MAX);
vm.state.current_frame.pc = spontaneous_panic();
return;
}

let heap = H::get_heap(&vm.state);
let value = vm.state.heaps[heap].read_u256(address);
Register1::set(args, &mut vm.state, value);
Expand All @@ -115,6 +112,12 @@ where
// They will panic, though because they are larger than 2^32.
let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state);

if bigger_than_last_address(pointer) {
let _ = vm.state.use_gas(u32::MAX);
vm.state.current_frame.pc = spontaneous_panic();
return ExecutionStatus::Running;
}

let address = pointer.low_u32();
let value = Register2::get(args, &mut vm.state);

Expand All @@ -124,14 +127,6 @@ where
return ExecutionStatus::Running;
}

// The heap is always grown even when the index nonsensical.
// TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
if bigger_than_last_address(pointer) {
let _ = vm.state.use_gas(u32::MAX);
vm.state.current_frame.pc = spontaneous_panic();
return ExecutionStatus::Running;
}

let heap = H::get_heap(&vm.state);
vm.state.heaps.write_u256(heap, address, value);

Expand All @@ -153,13 +148,10 @@ pub(crate) fn grow_heap<T, W, H: HeapFromState>(
state: &mut State<T, W>,
new_bound: u32,
) -> Result<(), ()> {
let already_paid = H::get_heap_size(state);
if *already_paid < new_bound {
let to_pay = new_bound - *already_paid;
*already_paid = new_bound;
state.use_gas(to_pay)?;
}
let to_pay = new_bound.saturating_sub(*H::get_heap_size(state));
state.use_gas(to_pay)?;

*H::get_heap_size(state) = new_bound;
Ok(())
}

Expand Down
2 changes: 1 addition & 1 deletion crates/vm2/src/instruction_handlers/ret.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ fn naked_ret<T: Tracer, W, RT: TypeLevelReturnType, const TO_LABEL: bool>(
None
} else {
let (raw_abi, is_pointer) = Register1::get_with_pointer_flag(args, &mut vm.state);
let result = get_far_call_calldata(raw_abi, is_pointer, vm, false).filter(|pointer| {
let result = get_far_call_calldata(raw_abi, is_pointer, vm).filter(|pointer| {
vm.state.current_frame.is_kernel
|| pointer.memory_page != vm.state.current_frame.calldata_heap
});
Expand Down

0 comments on commit abc0225

Please sign in to comment.