diff --git a/crates/vm2/src/instruction_handlers/far_call.rs b/crates/vm2/src/instruction_handlers/far_call.rs index 1fc8055..881c53f 100644 --- a/crates/vm2/src/instruction_handlers/far_call.rs +++ b/crates/vm2/src/instruction_handlers/far_call.rs @@ -73,12 +73,7 @@ where abi.is_constructor_call, ); - // calldata has to be constructed even if we already know we will panic because - // overflowing start + length makes the heap resize even when already panicking. - let already_failed = decommit_result.is_none() || IS_SHARD && abi.shard_id != 0; - - let maybe_calldata = - get_far_call_calldata(raw_abi, raw_abi_is_pointer, vm, already_failed); + let maybe_calldata = get_far_call_calldata(raw_abi, raw_abi_is_pointer, vm); // mandated gas is passed even if it means transferring more than the 63/64 rule allows if let Some(gas_left) = vm.state.current_frame.gas.checked_sub(mandated_gas) { @@ -90,6 +85,9 @@ where return None; }; + if IS_SHARD && abi.shard_id != 0 { + return None; + } let calldata = maybe_calldata?; let (unpaid_decommit, is_evm) = decommit_result?; let program = vm.world_diff.pay_for_decommit( @@ -188,7 +186,6 @@ pub(crate) fn get_far_call_calldata( raw_abi: U256, is_pointer: bool, vm: &mut VirtualMachine, - already_failed: bool, ) -> Option { let mut pointer = FatPointer::from(raw_abi); #[allow(clippy::cast_possible_truncation)] @@ -197,40 +194,29 @@ pub(crate) fn get_far_call_calldata( match FatPointerSource::from_abi(raw_source) { FatPointerSource::ForwardFatPointer => { - if !is_pointer || pointer.offset > pointer.length || already_failed { + if !is_pointer || pointer.offset > pointer.length { return None; } pointer.narrow(); } FatPointerSource::MakeNewPointer(target) => { - if let Some(bound) = pointer.start.checked_add(pointer.length) { - if is_pointer || pointer.offset != 0 || already_failed { - return None; - } - match target { - FatPointerTarget::ToHeap => { - grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?; - pointer.memory_page = vm.state.current_frame.heap; - } - FatPointerTarget::ToAuxHeap => { - grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?; - pointer.memory_page = vm.state.current_frame.aux_heap; - } + if is_pointer || pointer.offset != 0 { + return None; + } + let bound = pointer + .start + .checked_add(pointer.length) + .unwrap_or(u32::MAX); + match target { + FatPointerTarget::ToHeap => { + grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?; + pointer.memory_page = vm.state.current_frame.heap; } - } else { - // The heap is grown even if the pointer goes out of the heap - // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed - let bound = u32::MAX; - match target { - FatPointerTarget::ToHeap => { - grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?; - } - FatPointerTarget::ToAuxHeap => { - grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?; - } + FatPointerTarget::ToAuxHeap => { + grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?; + pointer.memory_page = vm.state.current_frame.aux_heap; } - return None; } } } diff --git a/crates/vm2/src/instruction_handlers/heap_access.rs b/crates/vm2/src/instruction_handlers/heap_access.rs index 7218fc9..0e85897 100644 --- a/crates/vm2/src/instruction_handlers/heap_access.rs +++ b/crates/vm2/src/instruction_handlers/heap_access.rs @@ -74,22 +74,19 @@ fn load( // They will panic, though because they are larger than 2^32. let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state); - let address = pointer.low_u32(); + if bigger_than_last_address(pointer) { + let _ = vm.state.use_gas(u32::MAX); + vm.state.current_frame.pc = spontaneous_panic(); + return; + } + let address = pointer.low_u32(); let new_bound = address.wrapping_add(32); if grow_heap::<_, _, H>(&mut vm.state, new_bound).is_err() { vm.state.current_frame.pc = spontaneous_panic(); return; }; - // The heap is always grown even when the index nonsensical. - // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed - if bigger_than_last_address(pointer) { - let _ = vm.state.use_gas(u32::MAX); - vm.state.current_frame.pc = spontaneous_panic(); - return; - } - let heap = H::get_heap(&vm.state); let value = vm.state.heaps[heap].read_u256(address); Register1::set(args, &mut vm.state, value); @@ -115,6 +112,12 @@ where // They will panic, though because they are larger than 2^32. let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state); + if bigger_than_last_address(pointer) { + let _ = vm.state.use_gas(u32::MAX); + vm.state.current_frame.pc = spontaneous_panic(); + return ExecutionStatus::Running; + } + let address = pointer.low_u32(); let value = Register2::get(args, &mut vm.state); @@ -124,14 +127,6 @@ where return ExecutionStatus::Running; } - // The heap is always grown even when the index nonsensical. - // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed - if bigger_than_last_address(pointer) { - let _ = vm.state.use_gas(u32::MAX); - vm.state.current_frame.pc = spontaneous_panic(); - return ExecutionStatus::Running; - } - let heap = H::get_heap(&vm.state); vm.state.heaps.write_u256(heap, address, value); @@ -153,13 +148,10 @@ pub(crate) fn grow_heap( state: &mut State, new_bound: u32, ) -> Result<(), ()> { - let already_paid = H::get_heap_size(state); - if *already_paid < new_bound { - let to_pay = new_bound - *already_paid; - *already_paid = new_bound; - state.use_gas(to_pay)?; - } + let to_pay = new_bound.saturating_sub(*H::get_heap_size(state)); + state.use_gas(to_pay)?; + *H::get_heap_size(state) = new_bound; Ok(()) } diff --git a/crates/vm2/src/instruction_handlers/ret.rs b/crates/vm2/src/instruction_handlers/ret.rs index 71d51b5..42b7014 100644 --- a/crates/vm2/src/instruction_handlers/ret.rs +++ b/crates/vm2/src/instruction_handlers/ret.rs @@ -43,7 +43,7 @@ fn naked_ret( None } else { let (raw_abi, is_pointer) = Register1::get_with_pointer_flag(args, &mut vm.state); - let result = get_far_call_calldata(raw_abi, is_pointer, vm, false).filter(|pointer| { + let result = get_far_call_calldata(raw_abi, is_pointer, vm).filter(|pointer| { vm.state.current_frame.is_kernel || pointer.memory_page != vm.state.current_frame.calldata_heap });