diff --git a/.lock b/.lock new file mode 100644 index 0000000..e69de29 diff --git a/crates.js b/crates.js new file mode 100644 index 0000000..7a066fc --- /dev/null +++ b/crates.js @@ -0,0 +1 @@ +window.ALL_CRATES = ["zksync_vm2","zksync_vm2_interface"]; \ No newline at end of file diff --git a/help.html b/help.html new file mode 100644 index 0000000..d0f8081 --- /dev/null +++ b/help.html @@ -0,0 +1 @@ +
VirtualMachine::run()
.\nFat pointer to a heap location.\nExecute the associated instruction if the “equal” …\nExecute the associated instruction if either of “greater …\nExecute the associated instruction if the “greater than…\nExecute the associated instruction if either of “less …\nExecute the associated instruction if either of “less …\nExecute the associated instruction if the “less than” …\nExecute the associated instruction if the “equal” …\nSingle EraVM instruction (an opcode + Arguments
).\nVM execution mode requirements (kernel only, not in static …\nThe executed program has panicked.\nPredicate for an instruction. Encoded so that comparing it …\nCompiled EraVM bytecode.\nThe executed program has finished and returned the …\nThe executed program has reverted returning the specified …\nVirtualMachine
settings.\nOpaque snapshot of a WorldDiff
output by its eponymous …\nChange in a single storage slot.\nVM storage access operations.\nReturned when the bootloader writes to the heap location …\nHigh-performance out-of-circuit EraVM implementation.\nEncapsulates VM interaction with the external world. This …\nPending modifications to the global state that are …\nAddressing modes supported by EraVM.\nValue written to the slot.\nValue before the slot was written to. None
if the slot was …\nReturns a reference to the code page of this program.\nComputes the cost of writing a storage slot.\nLoads a bytecode with the specified hash.\nLoads bytecode bytes for the decommit
opcode.\nReturns hashes of decommitted contract bytecodes in no …\nBytecode hash of the default account abstraction contract.\nReturns events emitted after the specified snapshot
was …\nBytecode hash of the EVM interpreter.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nCreates Add
instruction with the provided params.\nCreates And
instruction with the provided params.\nCreates an AuxHeapRead
instruction with the provided …\nCreates an AuxHeapWrite
instruction with the provided …\nCreates an AuxMutating0
instruction with the provided …\nCreates a Caller
instruction with the provided params.\nCreates a CodeAddress
instruction with the provided params.\nCreates a ContextMeta
instruction with the provided params.\nCreates an SP
instruction with the provided params.\nCreates a ContextU128
instruction with the provided params.\nCreates a Decommit
instruction with the provided params.\nCreates Div
instruction with the provided params.\nCreates an ErgsLeft
instruction with the provided params.\nCreates an Event
instruction with the provided params.\nCreates a FarCall
instruction with the provided mode and …\nCreates a HeapRead
instruction with the provided params.\nCreates a HeapWrite
instruction with the provided params.\nCreates an IncrementTxNumber
instruction with the provided …\nCreates a invalid instruction that will panic by draining …\nCreates a Jump
instruction with the provided params.\nCreates an L2ToL1Message
instruction with the provided …\nCreates Mul
instruction with the provided params.\nCreates a NearCall
instruction with the provided params.\nCreates a Nop
instruction with the provided params.\nCreates Or
instruction with the provided params.\nCreates a panic Ret
instruction with the provided params.\nCreates a PointerAdd
instruction with the provided params.\nCreates a PointerPack
instruction with the provided params.\nCreates an PointerRead
instruction with the provided …\nCreates a PointerShrink
instruction with the provided …\nCreates a PointerSub
instruction with the provided params.\nCreates a PrecompileCall
instruction with the provided …\nCreates a normal Ret
instruction with the provided params.\nCreates a revert Ret
instruction with the provided params.\nCreates RotateLeft
instruction with the provided params.\nCreates RotateRight
instruction with the provided params.\nCreates a SetContextU128
instruction with the provided …\nCreates ShiftLeft
instruction with the provided params.\nCreates ShiftRight
instruction with the provided params.\nCreates a StorageRead
instruction with the provided params.\nCreates a StorageWrite
instruction with the provided …\nCreates Sub
instruction with the provided params.\nCreates a This
instruction with the provided params.\nCreates a TransientStorageRead
instruction with the …\nCreates a TransientStorageWrite
instruction with the …\nCreates a new program from U256
words.\nCreates Xor
instruction with the provided params.\nGets changes for all touched storage slots.\nGets changes for storage slots touched after the specified …\nWriting to this address in the bootloader’s heap …\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nConverts this pointer into a U256
word.\nReturns if the storage slot is free both in terms of gas …\ntrue
if the slot is not set in the World
. A write may be …\nReturns L2-to-L1 logs emitted after the specified snapshot
…\nLength of the pointed slice in bytes.\nCreates a VM snapshot. The snapshot can then be rolled …\nID of the heap this points to.\nCreates a new program.\nCreates new requirements.\nCreates a new VM instance.\nCreates default requirements that always hold.\nAdditional pointer offset inside the …\nPops a previously made snapshot without rolling back to …\nReturns recorded pubdata costs for all storage operations.\nReads the specified slot from the storage.\nReturns how much of the extra gas limit is left and the …\nReturns the VM to the state it was in when …\nRuns this VM with the specified World
and Tracer
until an …\nGet a snapshot for selecting which logs & co. to output …\n0-based index of the pointer start byte at the memory
page.\nReturns recorded refunds for all storage operations.\nTest-only tools for EraVM.\nProvides a reference to the World
diff accumulated by VM …\nAbsolute addressing into stack.\nAbsolute stack addressing.\nAbsolute stack addressing.\nSame as RelativeStack
, but moves the stack pointer on …\nRelative stack addressing that updates the stack pointer …\nRelative stack addressing that updates the stack pointer …\nAll supported addressing modes for the first destination …\nAll supported addressing modes for the first source …\nArguments provided to an instruction in an EraVM bytecode.\nAbsolute addressing into the code page of the currently …\nAddressing into the code page of the executing contract.\nImmediate value passed as a first instruction arg.\nImmediate mode.\nImmediate mode.\nImmediate value passed as a second instruction arg.\nError converting AnySource
to RegisterOrImmediate
.\nRepresentation of one of 16 VM registers.\nRegister passed as a first instruction argument.\nRegister mode.\nRegister mode.\nRegister mode.\nRegister passed as a second instruction argument.\nCombination of a register and an immediate value wrapped …\nRegister or immediate addressing modes required by some VM …\nRelative addressing into stack (relative to the VM stack …\nRelative stack addressing.\nRelative stack addressing.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nImmediate value.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCreates arguments from the provided info.\nCreates a register with the specified 0-based index.\nRegister spec.\nTest World
implementation.\nReturns the argument unchanged.\nCalls U::from(self)
.\nCreates a test world with the provided programs.")
\ No newline at end of file
diff --git a/search.desc/zksync_vm2_interface/zksync_vm2_interface-desc-0-.js b/search.desc/zksync_vm2_interface/zksync_vm2_interface-desc-0-.js
new file mode 100644
index 0000000..daa2491
--- /dev/null
+++ b/search.desc/zksync_vm2_interface/zksync_vm2_interface-desc-0-.js
@@ -0,0 +1 @@
+searchState.loadedDescShard("zksync_vm2_interface", 0, "EraVM Stable Interface\nPublic interface of an EraVM call frame.\nAll supported calling modes for FarCall
opcode.\nCycle statistics emitted by the VM and supplied to …\nDecommitting an opcode.\nDelegate calling mode (similar to delegatecall
in EVM).\nCall to the ecrecover
precompile with the specified number …\nEvent emitted by EraVM.\nIdentifier of the heap used by the first executed program …\nIdentifier of the auxiliary heap used by the first …\nIdentifier of the calldata heap used by the first executed …\nVM execution flags. See the EraVM reference for more …\nIdentifier of a VM heap.\nCall to the keccak256
precompile with the specified number …\nL2-to-L1 log emitted by EraVM.\nMimic calling mode (can only be used by system contracts; …\nNormal calling mode.\nNormal return.\nAll supported EraVM opcodes in a single enumeration.\nTrait mapping opcodes as types to the corresponding …\nPanic, i.e. a non-revert abnormal control flow termination …\nAll supported return types for the Ret
opcode.\nRevert (e.g., a result of a Solidity revert
).\nCall to the secp256r1_verify
precompile with the specified …\nCall to the sha256
precompile with the specified number of …\nPublic interface of the VM state. Encompasses both read …\nReading a slot from the VM storage.\nWriting a slot to the VM storage.\nEraVM instruction tracer.\nOpcode
variant corresponding to this opcode type.\nAddress of the storage context associated with this frame. …\nAddress of the contract that has emitted this log.\nExecutes logic after an instruction handler.\nExecutes logic after an instruction handler.\nConverts this ID to an integer value.\nReturns ID of the auxiliary heap used in this call.\nReturns the auxiliary heap boundary (number of paid bytes).\nExecutes logic before an instruction handler.\nExecutes logic before an instruction handler.\nAddress of the calling contract. Respects delegate and …\nReturns a mutable handle to a call frame with the …\nAddress of the contract being executed.\nReturns the context value for this call. This context is …\nReturns the value of the context register.\nReturns a mutable handle to the current call frame.\n“Equal” flag.\nIterates over events emitted during VM execution.\nReturns the program counter that the parent frame should …\nReturns current execution flags.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the remaining amount of gas.\nIterates over storage slots read or written during VM …\nGets value of the specified transient storage slot.\nIterates over all transient storage slots set during VM …\n“Greater than” flag.\nReturns ID of the main heap used in this call.\nReturns the main heap boundary (number of paid bytes).\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nChecks if this return type is normal.\nIs this event first in a chain of events?\nChecks whether the call is executed in kernel mode.\nChecks whether this frame corresponds to a near call.\nIs this a service log?\nChecks whether the call is static.\nEvent key.\nLog key.\nIterates over L2-to-L1 logs emitted during VM execution.\n“Less than” flag.\nReturns the total number of call frames.\nProvides cycle statistics for “complex” instructions …\nProvides cycle statistics for “complex” instructions …\nEraVM opcodes.\nReturns the current program counter (i.e., 0-based index …\nGets the current amount of published pubdata.\nReads a word from the bytecode of the executing contract.\nReads a single byte from the specified heap at the …\nReads an entire U256
word in the big-endian order from the …\nReads a register with the specified zero-based index. …\nReads the specified stack slot. Returns a value together …\nSets the address of the executing contract.\nSets the auxiliary heap boundary.\nSets the address of the calling contract.\nSets the address of the contract being executed. Does not …\nSets the context value for this call.\nSets the value of the context register.\nSets the exception handler as specified above.\nSets current execution flags.\nSets the remaining amount of gas.\nSets the main heap boundary.\nSets the program counter. The VM will execute an invalid …\nSets the current amount of published pubdata.\nSets a register with the specified zero-based index\nSets the stack pointer.\nSets the current transaction number.\nShard identifier (currently, always set to 0).\nShard identifier (currently, always set to 0).\nReturns the stack pointer.\nAdditional gas provided for the duration of this callframe.\nReturns the currently set 0-based transaction number.\n0-based index of a transaction that has emitted this event.\n0-based index of a transaction that has emitted this event.\nEvent value.\nLog value.\nWrites an entire U256
word in the big-endian order to the …\nSets the value and pointer flag for the specified stack …\nSets value of the specified transient storage slot.\nAdd
opcode.\nAnd
opcode.\nAuxHeapRead
opcode.\nAuxHeapWrite
opcode.\nAuxMutating0
opcode.\nCaller
opcode.\nCodeAddress
opcode.\nContextMeta
opcode.\nContextU128
opcode.\nDecommit
opcode.\nDelegate FarCall
mode.\nDiv
opcode.\nErgsLeft
opcode.\nEvent
opcode.\nFarCall
group of opcodes distinguished by the calling mode …\nHeapRead
opcode.\nHeapWrite
opcode.\nIncrementTxNumber
opcode.\nJump
opcode.\nL2ToL1Message
opcode.\nMimic FarCall
mode.\nMul
opcode.\nNearCall
opcode.\nNop
opcode.\nNormal Ret
urn mode / FarCall
mode.\nOr
opcode.\nPanic Ret
urn mode.\nPointerAdd
opcode.\nPointerPack
opcode.\nPointerRead
opcode.\nPointerShrink
opcode.\nPointerSub
opcode.\nPrecompileCall
opcode.\nRet
group of opcodes distinguished by the return type …\nRevert Ret
urn mode.\nRotateLeft
opcode.\nRotateRight
opcode.\nSP
opcode.\nSetContextU128
opcode.\nShiftLeft
opcode.\nShiftRight
opcode.\nStorageRead
opcode.\nStorageWrite
opcode.\nSub
opcode.\nThis
opcode.\nTransientStorageRead
opcode.\nTransientStorageWrite
opcode.\nCalling mode for the FarCall
opcodes.\nReturn type for the Ret
opcodes.\nConstant corresponding to this mode allowing to easily …\nConstant corresponding to this return type allowing to …\nXor
opcode.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.")
\ No newline at end of file
diff --git a/settings.html b/settings.html
new file mode 100644
index 0000000..7d9f7b3
--- /dev/null
+++ b/settings.html
@@ -0,0 +1 @@
+1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +
//! Addressing modes supported by EraVM.
+
+#[cfg(feature = "arbitrary")]
+use arbitrary::{Arbitrary, Unstructured};
+use enum_dispatch::enum_dispatch;
+use primitive_types::U256;
+use zkevm_opcode_defs::erase_fat_pointer_metadata;
+
+use crate::{mode_requirements::ModeRequirements, predication::Predicate};
+
+pub(crate) trait Source {
+ /// Get a word's value for non-pointer operations. (Pointers are erased.)
+ fn get(args: &Arguments, state: &mut impl Addressable) -> U256 {
+ Self::get_with_pointer_flag_and_erasing(args, state).0
+ }
+
+ /// Get a word's value and pointer flag.
+ fn get_with_pointer_flag(args: &Arguments, state: &mut impl Addressable) -> (U256, bool) {
+ (Self::get(args, state), false)
+ }
+
+ /// Get a word's value, erasing pointers but also returning the pointer flag.
+ /// The flag will always be false unless in kernel mode.
+ /// Necessary for pointer operations, which for some reason erase their second argument
+ /// but also panic when it was a pointer.
+ fn get_with_pointer_flag_and_erasing(
+ args: &Arguments,
+ state: &mut impl Addressable,
+ ) -> (U256, bool) {
+ let (mut value, is_pointer) = Self::get_with_pointer_flag(args, state);
+ if is_pointer && !state.in_kernel_mode() {
+ erase_fat_pointer_metadata(&mut value);
+ }
+ (value, is_pointer && state.in_kernel_mode())
+ }
+}
+
+pub(crate) trait Destination {
+ /// Set this register/stack location to value and clear its pointer flag
+ fn set(args: &Arguments, state: &mut impl Addressable, value: U256);
+
+ /// Same as `set` but sets the pointer flag
+ fn set_fat_ptr(args: &Arguments, state: &mut impl Addressable, value: U256);
+}
+
+/// The part of VM state that addressing modes need to operate on
+pub(crate) trait Addressable {
+ fn registers(&mut self) -> &mut [U256; 16];
+ fn register_pointer_flags(&mut self) -> &mut u16;
+
+ fn read_stack(&mut self, slot: u16) -> U256;
+ fn write_stack(&mut self, slot: u16, value: U256);
+ fn stack_pointer(&mut self) -> &mut u16;
+
+ fn read_stack_pointer_flag(&mut self, slot: u16) -> bool;
+ fn set_stack_pointer_flag(&mut self, slot: u16);
+ fn clear_stack_pointer_flag(&mut self, slot: u16);
+
+ fn code_page(&self) -> &[U256];
+
+ fn in_kernel_mode(&self) -> bool;
+}
+
+#[enum_dispatch]
+pub(crate) trait SourceWriter {
+ fn write_source(&self, args: &mut Arguments);
+}
+
+impl<T: SourceWriter> SourceWriter for Option<T> {
+ fn write_source(&self, args: &mut Arguments) {
+ if let Some(x) = self {
+ x.write_source(args);
+ }
+ }
+}
+
+#[enum_dispatch]
+pub(crate) trait DestinationWriter {
+ fn write_destination(&self, args: &mut Arguments);
+}
+
+impl<T: DestinationWriter> DestinationWriter for Option<T> {
+ fn write_destination(&self, args: &mut Arguments) {
+ if let Some(x) = self {
+ x.write_destination(args);
+ }
+ }
+}
+
+/// Arguments provided to an instruction in an EraVM bytecode.
+// It is important for performance that this fits into 8 bytes.
+#[derive(Debug)]
+pub struct Arguments {
+ source_registers: PackedRegisters,
+ destination_registers: PackedRegisters,
+ immediate1: u16,
+ immediate2: u16,
+ predicate_and_mode_requirements: u8,
+ static_gas_cost: u8,
+}
+
+pub(crate) const L1_MESSAGE_COST: u32 = 156_250;
+pub(crate) const SSTORE_COST: u32 = 5_511;
+pub(crate) const SLOAD_COST: u32 = 2_008;
+pub(crate) const INVALID_INSTRUCTION_COST: u32 = 4_294_967_295;
+
+impl Arguments {
+ /// Creates arguments from the provided info.
+ #[allow(clippy::missing_panics_doc)] // never panics on properly created inputs
+ pub const fn new(
+ predicate: Predicate,
+ gas_cost: u32,
+ mode_requirements: ModeRequirements,
+ ) -> Self {
+ // Make sure that these two can be packed into 8 bits without overlapping
+ assert!(predicate as u8 & (0b11 << 6) == 0);
+ assert!(mode_requirements.0 & !0b11 == 0);
+
+ Self {
+ source_registers: PackedRegisters(0),
+ destination_registers: PackedRegisters(0),
+ immediate1: 0,
+ immediate2: 0,
+ predicate_and_mode_requirements: (predicate as u8) << 2 | mode_requirements.0,
+ static_gas_cost: Self::encode_static_gas_cost(gas_cost),
+ }
+ }
+
+ #[allow(clippy::cast_possible_truncation)] // checked
+ const fn encode_static_gas_cost(x: u32) -> u8 {
+ match x {
+ L1_MESSAGE_COST => 1,
+ SSTORE_COST => 2,
+ SLOAD_COST => 3,
+ INVALID_INSTRUCTION_COST => 4,
+ 1..=4 => panic!("Reserved gas cost values overlap with actual gas costs"),
+ x => {
+ if x > u8::MAX as u32 {
+ panic!("Gas cost doesn't fit into 8 bits");
+ } else {
+ x as u8
+ }
+ }
+ }
+ }
+
+ pub(crate) fn get_static_gas_cost(&self) -> u32 {
+ match self.static_gas_cost {
+ 1 => L1_MESSAGE_COST,
+ 2 => SSTORE_COST,
+ 3 => SLOAD_COST,
+ 4 => INVALID_INSTRUCTION_COST,
+ x => x.into(),
+ }
+ }
+
+ pub(crate) fn predicate(&self) -> Predicate {
+ unsafe { std::mem::transmute(self.predicate_and_mode_requirements >> 2) }
+ }
+
+ pub(crate) fn mode_requirements(&self) -> ModeRequirements {
+ ModeRequirements(self.predicate_and_mode_requirements & 0b11)
+ }
+
+ pub(crate) fn write_source(mut self, sw: &impl SourceWriter) -> Self {
+ sw.write_source(&mut self);
+ self
+ }
+
+ pub(crate) fn write_destination(mut self, sw: &impl DestinationWriter) -> Self {
+ sw.write_destination(&mut self);
+ self
+ }
+}
+
+/// Register passed as a first instruction argument.
+///
+/// It must not be used simultaneously with [`AbsoluteStack`], [`RelativeStack`], [`AdvanceStackPointer`],
+/// or [`CodePage`].
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct Register1(pub Register);
+
+/// Register passed as a second instruction argument.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct Register2(pub Register);
+
+impl Source for Register1 {
+ fn get_with_pointer_flag(args: &Arguments, state: &mut impl Addressable) -> (U256, bool) {
+ let register = args.source_registers.register1();
+ (register.value(state), register.pointer_flag(state))
+ }
+}
+
+impl SourceWriter for Register1 {
+ fn write_source(&self, args: &mut Arguments) {
+ args.source_registers.set_register1(self.0);
+ }
+}
+
+impl Source for Register2 {
+ fn get_with_pointer_flag(args: &Arguments, state: &mut impl Addressable) -> (U256, bool) {
+ let register = args.source_registers.register2();
+ (register.value(state), register.pointer_flag(state))
+ }
+}
+
+impl SourceWriter for Register2 {
+ fn write_source(&self, args: &mut Arguments) {
+ args.source_registers.set_register2(self.0);
+ }
+}
+
+impl Destination for Register1 {
+ fn set(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ args.destination_registers.register1().set(state, value);
+ }
+
+ fn set_fat_ptr(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ args.destination_registers.register1().set_ptr(state, value);
+ }
+}
+
+impl DestinationWriter for Register1 {
+ fn write_destination(&self, args: &mut Arguments) {
+ args.destination_registers.set_register1(self.0);
+ }
+}
+
+impl Destination for Register2 {
+ fn set(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ args.destination_registers.register2().set(state, value);
+ }
+
+ fn set_fat_ptr(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ args.destination_registers.register2().set_ptr(state, value);
+ }
+}
+
+impl DestinationWriter for Register2 {
+ fn write_destination(&self, args: &mut Arguments) {
+ args.destination_registers.set_register2(self.0);
+ }
+}
+
+/// Immediate value passed as a first instruction arg.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct Immediate1(pub u16);
+
+/// Immediate value passed as a second instruction arg.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct Immediate2(pub u16);
+
+impl Immediate1 {
+ pub(crate) fn get_u16(args: &Arguments) -> u16 {
+ args.immediate1
+ }
+}
+
+impl Immediate2 {
+ pub(crate) fn get_u16(args: &Arguments) -> u16 {
+ args.immediate2
+ }
+}
+
+impl Source for Immediate1 {
+ fn get(args: &Arguments, _state: &mut impl Addressable) -> U256 {
+ U256([args.immediate1.into(), 0, 0, 0])
+ }
+}
+
+impl SourceWriter for Immediate1 {
+ fn write_source(&self, args: &mut Arguments) {
+ args.immediate1 = self.0;
+ }
+}
+
+impl Source for Immediate2 {
+ fn get(args: &Arguments, _state: &mut impl Addressable) -> U256 {
+ U256([args.immediate2.into(), 0, 0, 0])
+ }
+}
+
+impl SourceWriter for Immediate2 {
+ fn write_source(&self, args: &mut Arguments) {
+ args.immediate2 = self.0;
+ }
+}
+
+/// Combination of a register and an immediate value wrapped by [`AbsoluteStack`], [`RelativeStack`],
+/// [`AdvanceStackPointer`] and [`CodePage`] addressing modes.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct RegisterAndImmediate {
+ /// Immediate value.
+ pub immediate: u16,
+ /// Register spec.
+ pub register: Register,
+}
+
+/// Any addressing mode that uses reg + imm in some way.
+/// They all encode their parameters in the same way.
+trait RegisterPlusImmediate {
+ fn inner(&self) -> &RegisterAndImmediate;
+}
+
+impl<T: RegisterPlusImmediate> SourceWriter for T {
+ fn write_source(&self, args: &mut Arguments) {
+ args.immediate1 = self.inner().immediate;
+ args.source_registers.set_register1(self.inner().register);
+ }
+}
+
+impl<T: RegisterPlusImmediate> DestinationWriter for T {
+ fn write_destination(&self, args: &mut Arguments) {
+ args.immediate2 = self.inner().immediate;
+ args.destination_registers
+ .set_register1(self.inner().register);
+ }
+}
+
+trait StackAddressing {
+ fn address_for_get(args: &Arguments, state: &mut impl Addressable) -> u16;
+ fn address_for_set(args: &Arguments, state: &mut impl Addressable) -> u16;
+}
+
+impl<T: StackAddressing> Source for T {
+ fn get_with_pointer_flag(args: &Arguments, state: &mut impl Addressable) -> (U256, bool) {
+ let address = Self::address_for_get(args, state);
+ (
+ state.read_stack(address),
+ state.read_stack_pointer_flag(address),
+ )
+ }
+}
+
+impl<T: StackAddressing> Destination for T {
+ fn set(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ let address = Self::address_for_set(args, state);
+ state.write_stack(address, value);
+ state.clear_stack_pointer_flag(address);
+ }
+
+ fn set_fat_ptr(args: &Arguments, state: &mut impl Addressable, value: U256) {
+ let address = Self::address_for_set(args, state);
+ state.write_stack(address, value);
+ state.set_stack_pointer_flag(address);
+ }
+}
+
+fn source_stack_address(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ compute_stack_address(state, args.source_registers.register1(), args.immediate1)
+}
+
+pub(crate) fn destination_stack_address(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ compute_stack_address(
+ state,
+ args.destination_registers.register1(),
+ args.immediate2,
+ )
+}
+
+/// Computes register + immediate (mod 2^16).
+/// Stack addresses are always in that remainder class anyway.
+#[allow(clippy::cast_possible_truncation)]
+fn compute_stack_address(state: &mut impl Addressable, register: Register, immediate: u16) -> u16 {
+ (register.value(state).low_u32() as u16).wrapping_add(immediate)
+}
+
+/// Absolute addressing into stack.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct AbsoluteStack(pub RegisterAndImmediate);
+
+impl RegisterPlusImmediate for AbsoluteStack {
+ fn inner(&self) -> &RegisterAndImmediate {
+ &self.0
+ }
+}
+
+impl StackAddressing for AbsoluteStack {
+ fn address_for_get(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ source_stack_address(args, state)
+ }
+
+ fn address_for_set(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ destination_stack_address(args, state)
+ }
+}
+
+/// Relative addressing into stack (relative to the VM stack pointer).
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct RelativeStack(pub RegisterAndImmediate);
+
+impl RegisterPlusImmediate for RelativeStack {
+ fn inner(&self) -> &RegisterAndImmediate {
+ &self.0
+ }
+}
+
+impl StackAddressing for RelativeStack {
+ fn address_for_get(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ state
+ .stack_pointer()
+ .wrapping_sub(source_stack_address(args, state))
+ }
+
+ fn address_for_set(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ state
+ .stack_pointer()
+ .wrapping_sub(destination_stack_address(args, state))
+ }
+}
+
+/// Same as [`RelativeStack`], but moves the stack pointer on access (decreases it when reading data;
+/// increases when writing data).
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct AdvanceStackPointer(pub RegisterAndImmediate);
+
+impl RegisterPlusImmediate for AdvanceStackPointer {
+ fn inner(&self) -> &RegisterAndImmediate {
+ &self.0
+ }
+}
+
+impl StackAddressing for AdvanceStackPointer {
+ fn address_for_get(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ let offset = source_stack_address(args, state);
+ let sp = state.stack_pointer();
+ *sp = sp.wrapping_sub(offset);
+ *sp
+ }
+
+ fn address_for_set(args: &Arguments, state: &mut impl Addressable) -> u16 {
+ let offset = destination_stack_address(args, state);
+ let sp = state.stack_pointer();
+ let address_to_set = *sp;
+ *sp = sp.wrapping_add(offset);
+ address_to_set
+ }
+}
+
+/// Absolute addressing into the code page of the currently executing program.
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub struct CodePage(pub RegisterAndImmediate);
+
+impl RegisterPlusImmediate for CodePage {
+ fn inner(&self) -> &RegisterAndImmediate {
+ &self.0
+ }
+}
+
+impl Source for CodePage {
+ fn get(args: &Arguments, state: &mut impl Addressable) -> U256 {
+ let address = source_stack_address(args, state);
+ state
+ .code_page()
+ .get(address as usize)
+ .copied()
+ .unwrap_or(U256::zero())
+ }
+}
+
+/// Representation of one of 16 VM registers.
+#[derive(Debug, Clone, Copy)]
+pub struct Register(u8);
+
+impl Register {
+ /// Creates a register with the specified 0-based index.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `n >= 16`; EraVM has 16 registers.
+ pub const fn new(n: u8) -> Self {
+ assert!(n < 16, "EraVM has 16 registers");
+ Self(n)
+ }
+
+ fn value(self, state: &mut impl Addressable) -> U256 {
+ unsafe { *state.registers().get_unchecked(self.0 as usize) }
+ }
+
+ fn pointer_flag(self, state: &mut impl Addressable) -> bool {
+ *state.register_pointer_flags() & (1 << self.0) != 0
+ }
+
+ fn set(self, state: &mut impl Addressable, value: U256) {
+ if self.0 != 0 {
+ unsafe { *state.registers().get_unchecked_mut(self.0 as usize) = value };
+ *state.register_pointer_flags() &= !(1 << self.0);
+ }
+ }
+
+ fn set_ptr(self, state: &mut impl Addressable, value: U256) {
+ if self.0 != 0 {
+ unsafe { *state.registers().get_unchecked_mut(self.0 as usize) = value };
+ *state.register_pointer_flags() |= 1 << self.0;
+ }
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl<'a> Arbitrary<'a> for Register {
+ #[allow(clippy::cast_possible_truncation)] // false positive: the value is <16
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self, arbitrary::Error> {
+ Ok(Register(u.choose_index(16)? as u8))
+ }
+}
+
+#[derive(Hash, Debug)]
+struct PackedRegisters(u8);
+
+impl PackedRegisters {
+ fn register1(&self) -> Register {
+ Register(self.0 >> 4)
+ }
+ fn set_register1(&mut self, value: Register) {
+ self.0 &= 0xf;
+ self.0 |= value.0 << 4;
+ }
+ fn register2(&self) -> Register {
+ Register(self.0 & 0xf)
+ }
+ fn set_register2(&mut self, value: Register) {
+ self.0 &= 0xf0;
+ self.0 |= value.0;
+ }
+}
+
+/// All supported addressing modes for the first source argument.
+#[enum_dispatch(SourceWriter)]
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub enum AnySource {
+ /// Register mode.
+ Register1,
+ /// Immediate mode.
+ Immediate1,
+ /// Absolute stack addressing.
+ AbsoluteStack,
+ /// Relative stack addressing.
+ RelativeStack,
+ /// Relative stack addressing that updates the stack pointer on access.
+ AdvanceStackPointer,
+ /// Addressing into the code page of the executing contract.
+ CodePage,
+}
+
+/// Register or immediate addressing modes required by some VM instructions.
+#[enum_dispatch(SourceWriter)]
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub enum RegisterOrImmediate {
+ /// Register mode.
+ Register1,
+ /// Immediate mode.
+ Immediate1,
+}
+
+/// Error converting [`AnySource`] to [`RegisterOrImmediate`].
+#[derive(Debug)]
+pub struct NotRegisterOrImmediate;
+
+impl TryFrom<AnySource> for RegisterOrImmediate {
+ type Error = NotRegisterOrImmediate;
+
+ fn try_from(value: AnySource) -> Result<Self, Self::Error> {
+ match value {
+ AnySource::Register1(r) => Ok(RegisterOrImmediate::Register1(r)),
+ AnySource::Immediate1(r) => Ok(RegisterOrImmediate::Immediate1(r)),
+ _ => Err(NotRegisterOrImmediate),
+ }
+ }
+}
+
+/// All supported addressing modes for the first destination argument.
+#[enum_dispatch(DestinationWriter)]
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
+pub enum AnyDestination {
+ /// Register mode.
+ Register1,
+ /// Absolute stack addressing.
+ AbsoluteStack,
+ /// Relative stack addressing.
+ RelativeStack,
+ /// Relative stack addressing that updates the stack pointer on access.
+ AdvanceStackPointer,
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +
/// Bitset with `1 << 16` elements. Used to store pointer flags for VM [`Stack`](crate::stack::Stack).
+#[derive(Clone, PartialEq, Debug, Hash)]
+pub(crate) struct Bitset([u64; 1 << 10]);
+
+impl Bitset {
+ #[inline(always)]
+ pub(crate) fn get(&self, i: u16) -> bool {
+ let (slot, bit) = slot_and_bit(i);
+ self.0[slot] & bit != 0
+ }
+
+ #[inline(always)]
+ pub(crate) fn set(&mut self, i: u16) {
+ let (slot, bit) = slot_and_bit(i);
+ self.0[slot] |= bit;
+ }
+
+ #[inline(always)]
+ pub(crate) fn clear(&mut self, i: u16) {
+ let (slot, bit) = slot_and_bit(i);
+ self.0[slot] &= !bit;
+ }
+}
+
+#[inline(always)]
+fn slot_and_bit(i: u16) -> (usize, u64) {
+ ((i >> 6) as usize, 1u64 << (i & 0b_0011_1111))
+}
+
+impl Default for Bitset {
+ fn default() -> Self {
+ Self([0; 1 << 10])
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +
use std::{mem, ptr};
+
+use primitive_types::H160;
+use zkevm_opcode_defs::system_params::{NEW_FRAME_MEMORY_STIPEND, NEW_KERNEL_FRAME_MEMORY_STIPEND};
+use zksync_vm2_interface::{HeapId, Tracer};
+
+use crate::{
+ decommit::is_kernel,
+ instruction_handlers::invalid_instruction,
+ program::Program,
+ stack::{Stack, StackSnapshot},
+ world_diff::Snapshot,
+ Instruction,
+};
+
+#[derive(Debug)]
+pub(crate) struct Callframe<T, W> {
+ pub(crate) address: H160,
+ pub(crate) code_address: H160,
+ pub(crate) caller: H160,
+ pub(crate) exception_handler: u16,
+ pub(crate) context_u128: u128,
+ pub(crate) is_static: bool,
+ pub(crate) is_kernel: bool,
+ pub(crate) stack: Box<Stack>,
+ pub(crate) sp: u16,
+ pub(crate) gas: u32,
+ pub(crate) stipend: u32,
+ pub(crate) near_calls: Vec<NearCallFrame>,
+ pub(crate) pc: *const Instruction<T, W>,
+ pub(crate) program: Program<T, W>,
+ pub(crate) heap: HeapId,
+ pub(crate) aux_heap: HeapId,
+ /// The amount of heap that has been paid for. This should always be greater
+ /// or equal to the actual size of the heap in memory.
+ pub(crate) heap_size: u32,
+ pub(crate) aux_heap_size: u32,
+ /// Returning a pointer to the calldata is illegal because it could result in
+ /// the caller's heap being accessible both directly and via the fat pointer.
+ /// The problem only occurs if the calldata originates from the caller's heap
+ /// but this rule is easy to implement.
+ pub(crate) calldata_heap: HeapId,
+ /// Because of the above rule we know that heaps returned to this frame only
+ /// exist to allow this frame to read from them. Therefore we can deallocate
+ /// all of them upon return, except possibly one that we pass on.
+ pub(crate) heaps_i_am_keeping_alive: Vec<HeapId>,
+ pub(crate) world_before_this_frame: Snapshot,
+}
+
+#[derive(Clone, PartialEq, Debug)]
+pub(crate) struct NearCallFrame {
+ pub(crate) exception_handler: u16,
+ pub(crate) previous_frame_sp: u16,
+ pub(crate) previous_frame_gas: u32,
+ pub(crate) previous_frame_pc: u16,
+ world_before_this_frame: Snapshot,
+}
+
+impl<T, W> Callframe<T, W> {
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn new(
+ address: H160,
+ code_address: H160,
+ caller: H160,
+ program: Program<T, W>,
+ stack: Box<Stack>,
+ heap: HeapId,
+ aux_heap: HeapId,
+ calldata_heap: HeapId,
+ gas: u32,
+ stipend: u32,
+ exception_handler: u16,
+ context_u128: u128,
+ is_static: bool,
+ world_before_this_frame: Snapshot,
+ ) -> Self {
+ let is_kernel = is_kernel(address);
+ let heap_size = if is_kernel {
+ NEW_KERNEL_FRAME_MEMORY_STIPEND
+ } else {
+ NEW_FRAME_MEMORY_STIPEND
+ };
+
+ Self {
+ address,
+ code_address,
+ caller,
+ pc: program.instruction(0).unwrap(),
+ program,
+ context_u128,
+ is_static,
+ is_kernel,
+ stack,
+ heap,
+ aux_heap,
+ heap_size,
+ aux_heap_size: heap_size,
+ calldata_heap,
+ heaps_i_am_keeping_alive: vec![],
+ sp: 0,
+ gas,
+ stipend,
+ exception_handler,
+ near_calls: vec![],
+ world_before_this_frame,
+ }
+ }
+}
+
+impl<T: Tracer, W> Callframe<T, W> {
+ pub(crate) fn push_near_call(
+ &mut self,
+ gas_to_call: u32,
+ exception_handler: u16,
+ world_before_this_frame: Snapshot,
+ ) {
+ self.near_calls.push(NearCallFrame {
+ exception_handler,
+ previous_frame_sp: self.sp,
+ previous_frame_gas: self.gas - gas_to_call,
+ previous_frame_pc: self.get_pc_as_u16(),
+ world_before_this_frame,
+ });
+ self.gas = gas_to_call;
+ }
+
+ pub(crate) fn pop_near_call(&mut self) -> Option<FrameRemnant> {
+ self.near_calls.pop().map(|f| {
+ self.sp = f.previous_frame_sp;
+ self.gas = f.previous_frame_gas;
+ self.set_pc_from_u16(f.previous_frame_pc);
+
+ FrameRemnant {
+ exception_handler: f.exception_handler,
+ snapshot: f.world_before_this_frame,
+ }
+ })
+ }
+
+ /// Gets a raw inferred program counter. This value can be garbage if the frame is on an invalid instruction or free panic.
+ #[allow(clippy::cast_possible_wrap)] // false positive: `Instruction` isn't that large
+ pub(crate) fn get_raw_pc(&self) -> isize {
+ // We cannot use `<*const _>::offset_from` because `self.pc` isn't guaranteed to be allocated within `self.program`
+ // (invalid instructions and free panics aren't).
+ let offset_in_bytes =
+ self.pc as isize - ptr::from_ref(self.program.instruction(0).unwrap()) as isize;
+ offset_in_bytes / mem::size_of::<Instruction<T, W>>() as isize
+ }
+
+ // TODO: can overflow / underflow after an invalid instruction or free panic. Ordinarily, this will lead to VM termination (for an invalid instruction)
+ // or the callframe getting popped (for a panic), but it's still technically possible to invoke this method afterwards in certain cases (e.g., in the bootloader callframe).
+ #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
+ pub(crate) fn get_pc_as_u16(&self) -> u16 {
+ self.get_raw_pc() as u16
+ }
+
+ /// Sets the next instruction to execute to the instruction at the given index.
+ /// If the index is out of bounds, the invalid instruction is used.
+ pub(crate) fn set_pc_from_u16(&mut self, index: u16) {
+ self.pc = self
+ .program
+ .instruction(index)
+ .unwrap_or_else(invalid_instruction);
+ }
+
+ /// The total amount of gas in this frame, including gas currently inaccessible because of a near call.
+ pub(crate) fn contained_gas(&self) -> u32 {
+ self.gas
+ + self
+ .near_calls
+ .iter()
+ .map(|f| f.previous_frame_gas)
+ .sum::<u32>()
+ }
+
+ pub(crate) fn snapshot(&self) -> CallframeSnapshot {
+ CallframeSnapshot {
+ stack: self.stack.snapshot(),
+
+ context_u128: self.context_u128,
+ sp: self.sp,
+ pc: self.get_pc_as_u16(),
+ gas: self.gas,
+ near_calls: self.near_calls.clone(),
+ heap_size: self.heap_size,
+ aux_heap_size: self.aux_heap_size,
+ heaps_i_was_keeping_alive: self.heaps_i_am_keeping_alive.len(),
+ }
+ }
+
+ /// Returns heaps that were created during the period that is rolled back
+ /// and thus can't be referenced anymore and should be deallocated.
+ pub(crate) fn rollback(
+ &mut self,
+ snapshot: CallframeSnapshot,
+ ) -> impl Iterator<Item = HeapId> + '_ {
+ let CallframeSnapshot {
+ stack,
+ context_u128,
+ sp,
+ pc,
+ gas,
+ near_calls,
+ heap_size,
+ aux_heap_size,
+ heaps_i_was_keeping_alive,
+ } = snapshot;
+
+ self.stack.rollback(stack);
+
+ self.context_u128 = context_u128;
+ self.sp = sp;
+ self.set_pc_from_u16(pc);
+ self.gas = gas;
+ self.near_calls = near_calls;
+ self.heap_size = heap_size;
+ self.aux_heap_size = aux_heap_size;
+
+ self.heaps_i_am_keeping_alive
+ .drain(heaps_i_was_keeping_alive..)
+ }
+}
+
+pub(crate) struct FrameRemnant {
+ pub(crate) exception_handler: u16,
+ pub(crate) snapshot: Snapshot,
+}
+
+/// Only contains the fields that can change (other than via tracer).
+#[derive(Debug)]
+pub(crate) struct CallframeSnapshot {
+ stack: StackSnapshot,
+ context_u128: u128,
+ sp: u16,
+ pc: u16,
+ gas: u32,
+ near_calls: Vec<NearCallFrame>,
+ heap_size: u32,
+ aux_heap_size: u32,
+ heaps_i_was_keeping_alive: usize,
+}
+
+impl<T, W> Clone for Callframe<T, W> {
+ fn clone(&self) -> Self {
+ Self {
+ address: self.address,
+ code_address: self.code_address,
+ caller: self.caller,
+ exception_handler: self.exception_handler,
+ context_u128: self.context_u128,
+ is_static: self.is_static,
+ is_kernel: self.is_kernel,
+ stack: self.stack.clone(),
+ sp: self.sp,
+ gas: self.gas,
+ stipend: self.stipend,
+ near_calls: self.near_calls.clone(),
+ pc: self.pc,
+ program: self.program.clone(),
+ heap: self.heap,
+ aux_heap: self.aux_heap,
+ heap_size: self.heap_size,
+ aux_heap_size: self.aux_heap_size,
+ calldata_heap: self.calldata_heap,
+ heaps_i_am_keeping_alive: self.heaps_i_am_keeping_alive.clone(),
+ world_before_this_frame: self.world_before_this_frame.clone(),
+ }
+ }
+}
+
+impl<T, W> PartialEq for Callframe<T, W> {
+ fn eq(&self, other: &Self) -> bool {
+ self.address == other.address
+ && self.code_address == other.code_address
+ && self.caller == other.caller
+ && self.exception_handler == other.exception_handler
+ && self.context_u128 == other.context_u128
+ && self.is_static == other.is_static
+ && self.stack == other.stack
+ && self.sp == other.sp
+ && self.gas == other.gas
+ && self.stipend == other.stipend
+ && self.near_calls == other.near_calls
+ && self.pc == other.pc
+ && self.program == other.program
+ && self.heap == other.heap
+ && self.aux_heap == other.aux_heap
+ && self.heap_size == other.heap_size
+ && self.aux_heap_size == other.aux_heap_size
+ && self.calldata_heap == other.calldata_heap
+ && self.heaps_i_am_keeping_alive == other.heaps_i_am_keeping_alive
+ && self.world_before_this_frame == other.world_before_this_frame
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +
use zkevm_opcode_defs::{
+ decoding::{EncodingModeProduction, VmEncodingMode},
+ ImmMemHandlerFlags, Opcode,
+ Operand::{Full, RegOnly, RegOrImm},
+ RegOrImmFlags, FAR_CALL_SHARD_FLAG_IDX, FAR_CALL_STATIC_FLAG_IDX, FIRST_MESSAGE_FLAG_IDX,
+ RET_TO_LABEL_BIT_IDX, SET_FLAGS_FLAG_IDX, SWAP_OPERANDS_FLAG_IDX_FOR_ARITH_OPCODES,
+ SWAP_OPERANDS_FLAG_IDX_FOR_PTR_OPCODE, UMA_INCREMENT_FLAG_IDX,
+};
+use zksync_vm2_interface::{
+ opcodes::{
+ self, Add, And, Div, Mul, Or, PointerAdd, PointerPack, PointerShrink, PointerSub,
+ RotateLeft, RotateRight, ShiftLeft, ShiftRight, Sub, Xor,
+ },
+ Tracer,
+};
+
+use crate::{
+ addressing_modes::{
+ AbsoluteStack, AdvanceStackPointer, AnyDestination, AnySource, Arguments, CodePage,
+ Immediate1, Immediate2, Register, Register1, Register2, RegisterAndImmediate,
+ RelativeStack, SourceWriter,
+ },
+ instruction::{ExecutionEnd, ExecutionStatus},
+ mode_requirements::ModeRequirements,
+ Instruction, Predicate, VirtualMachine, World,
+};
+
+fn unimplemented_instruction<T, W>(variant: Opcode) -> Instruction<T, W> {
+ let mut arguments = Arguments::new(Predicate::Always, 0, ModeRequirements::none());
+ let variant_as_number: u16 = unsafe { std::mem::transmute(variant) };
+ Immediate1(variant_as_number).write_source(&mut arguments);
+ Instruction {
+ handler: unimplemented_handler,
+ arguments,
+ }
+}
+
+fn unimplemented_handler<T, W>(
+ vm: &mut VirtualMachine<T, W>,
+ _: &mut W,
+ _: &mut T,
+) -> ExecutionStatus {
+ let variant: Opcode = unsafe {
+ std::mem::transmute(Immediate1::get_u16(&(*vm.state.current_frame.pc).arguments))
+ };
+ eprintln!("Unimplemented instruction: {variant:?}");
+ ExecutionStatus::Stopped(ExecutionEnd::Panicked)
+}
+
+#[allow(clippy::too_many_lines)]
+pub(crate) fn decode<T: Tracer, W: World<T>>(raw: u64, is_bootloader: bool) -> Instruction<T, W> {
+ let (parsed, _) = EncodingModeProduction::parse_preliminary_variant_and_absolute_number(raw);
+
+ let predicate = match parsed.condition {
+ zkevm_opcode_defs::Condition::Always => Predicate::Always,
+ zkevm_opcode_defs::Condition::Gt => Predicate::IfGT,
+ zkevm_opcode_defs::Condition::Lt => Predicate::IfLT,
+ zkevm_opcode_defs::Condition::Eq => Predicate::IfEQ,
+ zkevm_opcode_defs::Condition::Ge => Predicate::IfGE,
+ zkevm_opcode_defs::Condition::Le => Predicate::IfLE,
+ zkevm_opcode_defs::Condition::Ne => Predicate::IfNotEQ,
+ zkevm_opcode_defs::Condition::GtOrLt => Predicate::IfGTOrLT,
+ };
+ let arguments = Arguments::new(
+ predicate,
+ parsed.variant.ergs_price(),
+ ModeRequirements::new(
+ parsed.variant.requires_kernel_mode(),
+ !parsed.variant.can_be_used_in_static_context(),
+ ),
+ );
+
+ let stack_in = RegisterAndImmediate {
+ immediate: parsed.imm_0,
+ register: Register::new(parsed.src0_reg_idx),
+ };
+ let src1: AnySource = match parsed.variant.src0_operand_type {
+ RegOnly | RegOrImm(RegOrImmFlags::UseRegOnly) | Full(ImmMemHandlerFlags::UseRegOnly) => {
+ Register1(Register::new(parsed.src0_reg_idx)).into()
+ }
+ RegOrImm(RegOrImmFlags::UseImm16Only) | Full(ImmMemHandlerFlags::UseImm16Only) => {
+ Immediate1(parsed.imm_0).into()
+ }
+ Full(ImmMemHandlerFlags::UseAbsoluteOnStack) => AbsoluteStack(stack_in).into(),
+ Full(ImmMemHandlerFlags::UseStackWithPushPop) => AdvanceStackPointer(stack_in).into(),
+ Full(ImmMemHandlerFlags::UseStackWithOffset) => RelativeStack(stack_in).into(),
+ Full(ImmMemHandlerFlags::UseCodePage) => CodePage(stack_in).into(),
+ };
+
+ let stack_out = RegisterAndImmediate {
+ immediate: parsed.imm_1,
+ register: Register::new(parsed.dst0_reg_idx),
+ };
+ let out: AnyDestination = match parsed.variant.dst0_operand_type {
+ RegOnly | RegOrImm(RegOrImmFlags::UseRegOnly) | Full(ImmMemHandlerFlags::UseRegOnly) => {
+ Register1(Register::new(parsed.dst0_reg_idx)).into()
+ }
+ RegOrImm(RegOrImmFlags::UseImm16Only) | Full(ImmMemHandlerFlags::UseImm16Only) => {
+ panic!("Parser wants to output to immediate")
+ }
+ Full(ImmMemHandlerFlags::UseAbsoluteOnStack) => AbsoluteStack(stack_out).into(),
+ Full(ImmMemHandlerFlags::UseStackWithPushPop) => AdvanceStackPointer(stack_out).into(),
+ Full(ImmMemHandlerFlags::UseStackWithOffset) => RelativeStack(stack_out).into(),
+ Full(ImmMemHandlerFlags::UseCodePage) => panic!("Parser wants to write to code page"),
+ };
+
+ let src2 = Register2(Register::new(parsed.src1_reg_idx));
+ let out2 = Register2(Register::new(parsed.dst1_reg_idx));
+
+ macro_rules! binop {
+ ($op: ident, $snd: tt) => {
+ Instruction::from_binop::<$op>(
+ src1,
+ src2,
+ out,
+ &$snd,
+ arguments,
+ parsed.variant.flags[SWAP_OPERANDS_FLAG_IDX_FOR_ARITH_OPCODES],
+ parsed.variant.flags[SET_FLAGS_FLAG_IDX],
+ )
+ };
+ }
+
+ macro_rules! ptr {
+ ($op: ident) => {
+ Instruction::from_ptr::<$op>(
+ src1,
+ src2,
+ out,
+ arguments,
+ parsed.variant.flags[SWAP_OPERANDS_FLAG_IDX_FOR_PTR_OPCODE],
+ )
+ };
+ }
+
+ match parsed.variant.opcode {
+ Opcode::Add(_) => binop!(Add, ()),
+ Opcode::Sub(_) => binop!(Sub, ()),
+ Opcode::Mul(_) => binop!(Mul, out2),
+ Opcode::Div(_) => binop!(Div, out2),
+ Opcode::Binop(x) => match x {
+ zkevm_opcode_defs::BinopOpcode::Xor => binop!(Xor, ()),
+ zkevm_opcode_defs::BinopOpcode::And => binop!(And, ()),
+ zkevm_opcode_defs::BinopOpcode::Or => binop!(Or, ()),
+ },
+ Opcode::Shift(x) => match x {
+ zkevm_opcode_defs::ShiftOpcode::Shl => binop!(ShiftLeft, ()),
+ zkevm_opcode_defs::ShiftOpcode::Shr => binop!(ShiftRight, ()),
+ zkevm_opcode_defs::ShiftOpcode::Rol => binop!(RotateLeft, ()),
+ zkevm_opcode_defs::ShiftOpcode::Ror => binop!(RotateRight, ()),
+ },
+ Opcode::Jump(_) => Instruction::from_jump(src1, out.try_into().unwrap(), arguments),
+ Opcode::Context(x) => match x {
+ zkevm_opcode_defs::ContextOpcode::This => {
+ Instruction::from_this(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::Caller => {
+ Instruction::from_caller(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::CodeAddress => {
+ Instruction::from_code_address(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::ErgsLeft => {
+ Instruction::from_ergs_left(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::GetContextU128 => {
+ Instruction::from_context_u128(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::SetContextU128 => {
+ Instruction::from_set_context_u128(src1.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::Sp => {
+ Instruction::from_context_sp(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::Meta => {
+ Instruction::from_context_meta(out.try_into().unwrap(), arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::IncrementTxNumber => {
+ Instruction::from_increment_tx_number(arguments)
+ }
+ zkevm_opcode_defs::ContextOpcode::AuxMutating0 => {
+ Instruction::from_aux_mutating(arguments)
+ }
+ },
+ Opcode::Ptr(x) => match x {
+ zkevm_opcode_defs::PtrOpcode::Add => ptr!(PointerAdd),
+ zkevm_opcode_defs::PtrOpcode::Sub => ptr!(PointerSub),
+ zkevm_opcode_defs::PtrOpcode::Pack => ptr!(PointerPack),
+ zkevm_opcode_defs::PtrOpcode::Shrink => ptr!(PointerShrink),
+ },
+ Opcode::NearCall(_) => Instruction::from_near_call(
+ Register1(Register::new(parsed.src0_reg_idx)),
+ Immediate1(parsed.imm_0),
+ Immediate2(parsed.imm_1),
+ arguments,
+ ),
+ Opcode::FarCall(kind) => {
+ let constructor = match kind {
+ zkevm_opcode_defs::FarCallOpcode::Normal => {
+ Instruction::from_far_call::<opcodes::Normal>
+ }
+ zkevm_opcode_defs::FarCallOpcode::Delegate => {
+ Instruction::from_far_call::<opcodes::Delegate>
+ }
+ zkevm_opcode_defs::FarCallOpcode::Mimic => {
+ Instruction::from_far_call::<opcodes::Mimic>
+ }
+ };
+ constructor(
+ src1.try_into().unwrap(),
+ src2,
+ Immediate1(parsed.imm_0),
+ parsed.variant.flags[FAR_CALL_STATIC_FLAG_IDX],
+ parsed.variant.flags[FAR_CALL_SHARD_FLAG_IDX],
+ arguments,
+ )
+ }
+ Opcode::Ret(kind) => {
+ let to_label = parsed.variant.flags[RET_TO_LABEL_BIT_IDX];
+ let label = if to_label {
+ Some(Immediate1(parsed.imm_0))
+ } else {
+ None
+ };
+ match kind {
+ zkevm_opcode_defs::RetOpcode::Ok => {
+ Instruction::from_ret(src1.try_into().unwrap(), label, arguments)
+ }
+ zkevm_opcode_defs::RetOpcode::Revert => {
+ Instruction::from_revert(src1.try_into().unwrap(), label, arguments)
+ }
+ zkevm_opcode_defs::RetOpcode::Panic => Instruction::from_panic(label, arguments),
+ }
+ }
+ Opcode::Log(x) => match x {
+ zkevm_opcode_defs::LogOpcode::StorageRead => Instruction::from_storage_read(
+ src1.try_into().unwrap(),
+ out.try_into().unwrap(),
+ arguments,
+ ),
+ zkevm_opcode_defs::LogOpcode::TransientStorageRead => {
+ Instruction::from_transient_storage_read(
+ src1.try_into().unwrap(),
+ out.try_into().unwrap(),
+ arguments,
+ )
+ }
+
+ zkevm_opcode_defs::LogOpcode::StorageWrite => {
+ Instruction::from_storage_write(src1.try_into().unwrap(), src2, arguments)
+ }
+
+ zkevm_opcode_defs::LogOpcode::TransientStorageWrite => {
+ Instruction::from_transient_storage_write(src1.try_into().unwrap(), src2, arguments)
+ }
+
+ zkevm_opcode_defs::LogOpcode::ToL1Message => Instruction::from_l2_to_l1_message(
+ src1.try_into().unwrap(),
+ src2,
+ parsed.variant.flags[FIRST_MESSAGE_FLAG_IDX],
+ arguments,
+ ),
+ zkevm_opcode_defs::LogOpcode::Event => Instruction::from_event(
+ src1.try_into().unwrap(),
+ src2,
+ parsed.variant.flags[FIRST_MESSAGE_FLAG_IDX],
+ arguments,
+ ),
+ zkevm_opcode_defs::LogOpcode::PrecompileCall => Instruction::from_precompile_call(
+ src1.try_into().unwrap(),
+ src2,
+ out.try_into().unwrap(),
+ arguments,
+ ),
+ zkevm_opcode_defs::LogOpcode::Decommit => Instruction::from_decommit(
+ src1.try_into().unwrap(),
+ src2,
+ out.try_into().unwrap(),
+ arguments,
+ ),
+ },
+ Opcode::UMA(x) => {
+ let increment = parsed.variant.flags[UMA_INCREMENT_FLAG_IDX];
+ match x {
+ zkevm_opcode_defs::UMAOpcode::HeapRead => Instruction::from_heap_read(
+ src1.try_into().unwrap(),
+ out.try_into().unwrap(),
+ increment.then_some(out2),
+ arguments,
+ ),
+ zkevm_opcode_defs::UMAOpcode::HeapWrite => Instruction::from_heap_write(
+ src1.try_into().unwrap(),
+ src2,
+ increment.then_some(out.try_into().unwrap()),
+ arguments,
+ is_bootloader,
+ ),
+ zkevm_opcode_defs::UMAOpcode::AuxHeapRead => Instruction::from_aux_heap_read(
+ src1.try_into().unwrap(),
+ out.try_into().unwrap(),
+ increment.then_some(out2),
+ arguments,
+ ),
+ zkevm_opcode_defs::UMAOpcode::AuxHeapWrite => Instruction::from_aux_heap_store(
+ src1.try_into().unwrap(),
+ src2,
+ increment.then_some(out.try_into().unwrap()),
+ arguments,
+ ),
+ zkevm_opcode_defs::UMAOpcode::FatPointerRead => Instruction::from_pointer_read(
+ src1.try_into().unwrap(),
+ out.try_into().unwrap(),
+ increment.then_some(out2),
+ arguments,
+ ),
+ zkevm_opcode_defs::UMAOpcode::StaticMemoryRead => unimplemented_instruction(
+ Opcode::UMA(zkevm_opcode_defs::UMAOpcode::StaticMemoryRead),
+ ),
+ zkevm_opcode_defs::UMAOpcode::StaticMemoryWrite => unimplemented_instruction(
+ Opcode::UMA(zkevm_opcode_defs::UMAOpcode::StaticMemoryWrite),
+ ),
+ }
+ }
+ Opcode::Invalid(_) => Instruction::from_invalid(),
+ Opcode::Nop(_) => {
+ let no_sp_movement = AdvanceStackPointer(RegisterAndImmediate {
+ immediate: 0,
+ register: Register::new(0),
+ });
+ Instruction::from_nop(
+ if let AnySource::AdvanceStackPointer(pop) = src1 {
+ pop
+ } else {
+ no_sp_movement
+ },
+ if let AnyDestination::AdvanceStackPointer(push) = out {
+ push
+ } else {
+ no_sp_movement
+ },
+ arguments,
+ )
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +
use primitive_types::{H160, U256};
+use zkevm_opcode_defs::{
+ ethereum_types::Address, system_params::DEPLOYER_SYSTEM_CONTRACT_ADDRESS_LOW,
+};
+use zksync_vm2_interface::{CycleStats, Tracer};
+
+use crate::{program::Program, world_diff::WorldDiff, World};
+
+impl WorldDiff {
+ pub(crate) fn decommit<T: Tracer>(
+ &mut self,
+ world: &mut impl World<T>,
+ tracer: &mut T,
+ address: U256,
+ default_aa_code_hash: [u8; 32],
+ evm_interpreter_code_hash: [u8; 32],
+ is_constructor_call: bool,
+ ) -> Option<(UnpaidDecommit, bool)> {
+ let deployer_system_contract_address =
+ Address::from_low_u64_be(DEPLOYER_SYSTEM_CONTRACT_ADDRESS_LOW.into());
+
+ let mut is_evm = false;
+
+ let mut code_info = {
+ let code_info = self.read_storage_without_refund(
+ world,
+ tracer,
+ deployer_system_contract_address,
+ address,
+ );
+ let mut code_info_bytes = [0; 32];
+ code_info.to_big_endian(&mut code_info_bytes);
+
+ // Note that EOAs are considered constructed because their code info is all zeroes.
+ let is_constructed = match code_info_bytes[1] {
+ 0 => true,
+ 1 => false,
+ _ => {
+ return None;
+ }
+ };
+
+ let try_default_aa = if is_kernel(u256_into_address(address)) {
+ None
+ } else {
+ Some(default_aa_code_hash)
+ };
+
+ // The address aliasing contract implements Ethereum-like behavior of calls to EOAs
+ // returning successfully (and address aliasing when called from the bootloader).
+ // It makes sense that unconstructed code is treated as an EOA but for some reason
+ // a constructor call to constructed code is also treated as EOA.
+ match code_info_bytes[0] {
+ 1 => {
+ if is_constructed == is_constructor_call {
+ try_default_aa?
+ } else {
+ code_info_bytes
+ }
+ }
+ 2 => {
+ if is_constructed == is_constructor_call {
+ try_default_aa?
+ } else {
+ is_evm = true;
+ evm_interpreter_code_hash
+ }
+ }
+ _ if code_info == U256::zero() => try_default_aa?,
+ _ => return None,
+ }
+ };
+
+ code_info[1] = 0;
+ let code_key: U256 = U256::from_big_endian(&code_info);
+
+ let was_decommitted = self.decommitted_hashes.as_ref().get(&code_key) == Some(&true);
+ let cost = if was_decommitted {
+ 0
+ } else {
+ let code_length_in_words = u16::from_be_bytes([code_info[2], code_info[3]]);
+ u32::from(code_length_in_words) * zkevm_opcode_defs::ERGS_PER_CODE_WORD_DECOMMITTMENT
+ };
+
+ Some((UnpaidDecommit { cost, code_key }, is_evm))
+ }
+
+ /// Returns the decommitted contract code and a flag set to `true` if this is a fresh decommit (i.e.,
+ /// the code wasn't decommitted previously in the same VM run).
+ #[doc(hidden)] // should be used for testing purposes only; can break VM operation otherwise
+ pub fn decommit_opcode<T: Tracer>(
+ &mut self,
+ world: &mut impl World<T>,
+ tracer: &mut T,
+ code_hash: U256,
+ ) -> (Vec<u8>, bool) {
+ let is_new = self.decommitted_hashes.insert(code_hash, true) != Some(true);
+ let code = world.decommit_code(code_hash);
+ if is_new {
+ let code_len = u32::try_from(code.len()).expect("bytecode length overflow");
+ // Decommitter can process two words per cycle, hence division by 2 * 32 = 64.
+ tracer.on_extra_prover_cycles(CycleStats::Decommit(code_len.div_ceil(64)));
+ }
+ (code, is_new)
+ }
+
+ pub(crate) fn pay_for_decommit<T: Tracer, W: World<T>>(
+ &mut self,
+ world: &mut W,
+ tracer: &mut T,
+ decommit: UnpaidDecommit,
+ gas: &mut u32,
+ ) -> Option<Program<T, W>> {
+ if decommit.cost > *gas {
+ // We intentionally record a decommitment event even if actual decommitment never happens because of an out-of-gas error.
+ // This is how the old VM behaves.
+ self.decommitted_hashes.insert(decommit.code_key, false);
+ // Unlike all other gas costs, this one is not paid if low on gas.
+ return None;
+ }
+
+ let is_new = self.decommitted_hashes.insert(decommit.code_key, true) != Some(true);
+ *gas -= decommit.cost;
+
+ let decommit = world.decommit(decommit.code_key);
+ if is_new {
+ let code_len_in_words =
+ u32::try_from(decommit.code_page().len()).expect("bytecode length overflow");
+ // Decommitter can process two words per cycle.
+ tracer.on_extra_prover_cycles(CycleStats::Decommit(code_len_in_words.div_ceil(2)));
+ }
+
+ Some(decommit)
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) struct UnpaidDecommit {
+ cost: u32,
+ code_key: U256,
+}
+
+pub(crate) fn u256_into_address(source: U256) -> H160 {
+ let mut result = H160::zero();
+ let mut bytes = [0; 32];
+ source.to_big_endian(&mut bytes);
+ result.assign_from_slice(&bytes[12..]);
+ result
+}
+
+pub(crate) fn is_kernel(address: H160) -> bool {
+ address.0[..18].iter().all(|&byte| byte == 0)
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +
use std::ptr;
+
+use primitive_types::U256;
+use zksync_vm2_interface::HeapId;
+
+/// Fat pointer to a heap location.
+#[derive(Debug)]
+#[repr(C)]
+pub struct FatPointer {
+ /// Additional pointer offset inside the `start..(start + length)` range.
+ pub offset: u32,
+ /// ID of the heap this points to.
+ pub memory_page: HeapId,
+ /// 0-based index of the pointer start byte at the `memory` page.
+ pub start: u32,
+ /// Length of the pointed slice in bytes.
+ pub length: u32,
+}
+
+#[cfg(target_endian = "little")]
+impl From<&mut U256> for &mut FatPointer {
+ fn from(value: &mut U256) -> Self {
+ unsafe { &mut *ptr::from_mut(value).cast() }
+ }
+}
+
+#[cfg(target_endian = "little")]
+impl From<U256> for FatPointer {
+ fn from(value: U256) -> Self {
+ unsafe { std::mem::transmute(value.low_u128()) }
+ }
+}
+
+impl FatPointer {
+ /// Converts this pointer into a `U256` word.
+ #[cfg(target_endian = "little")]
+ pub fn into_u256(self) -> U256 {
+ U256::zero() + unsafe { std::mem::transmute::<FatPointer, u128>(self) }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +
use std::{
+ fmt, mem,
+ ops::{Index, Range},
+};
+
+use primitive_types::U256;
+use zksync_vm2_interface::HeapId;
+
+/// Heap page size in bytes.
+const HEAP_PAGE_SIZE: usize = 1 << 12;
+
+/// Heap page.
+#[derive(Debug, Clone, PartialEq)]
+struct HeapPage(Box<[u8; HEAP_PAGE_SIZE]>);
+
+impl Default for HeapPage {
+ fn default() -> Self {
+ let boxed_slice: Box<[u8]> = vec![0_u8; HEAP_PAGE_SIZE].into();
+ Self(boxed_slice.try_into().unwrap())
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+pub(crate) struct Heap {
+ pages: Vec<Option<HeapPage>>,
+}
+
+// We never remove `HeapPage`s (even after rollbacks – although we do zero all added pages in this case),
+// we allow additional pages to be present if they are zeroed.
+impl PartialEq for Heap {
+ fn eq(&self, other: &Self) -> bool {
+ for i in 0..self.pages.len().max(other.pages.len()) {
+ let this_page = self.pages.get(i).and_then(Option::as_ref);
+ let other_page = other.pages.get(i).and_then(Option::as_ref);
+ match (this_page, other_page) {
+ (Some(this_page), Some(other_page)) => {
+ if this_page != other_page {
+ return false;
+ }
+ }
+ (Some(page), None) | (None, Some(page)) => {
+ if page.0.iter().any(|&byte| byte != 0) {
+ return false;
+ }
+ }
+ (None, None) => { /* do nothing */ }
+ }
+ }
+ true
+ }
+}
+
+impl Heap {
+ fn from_bytes(bytes: &[u8], pagepool: &mut PagePool) -> Self {
+ let pages = bytes
+ .chunks(HEAP_PAGE_SIZE)
+ .map(|bytes| {
+ Some(if let Some(mut page) = pagepool.get_dirty_page() {
+ page.0[..bytes.len()].copy_from_slice(bytes);
+ page.0[bytes.len()..].fill(0);
+ page
+ } else {
+ let mut page = HeapPage::default();
+ page.0[..bytes.len()].copy_from_slice(bytes);
+ page
+ })
+ })
+ .collect();
+ Self { pages }
+ }
+
+ pub(crate) fn read_u256(&self, start_address: u32) -> U256 {
+ let (page_idx, offset_in_page) = address_to_page_offset(start_address);
+ let bytes_in_page = HEAP_PAGE_SIZE - offset_in_page;
+
+ if bytes_in_page >= 32 {
+ if let Some(page) = self.page(page_idx) {
+ U256::from_big_endian(&page.0[offset_in_page..offset_in_page + 32])
+ } else {
+ U256::zero()
+ }
+ } else {
+ let mut result = [0u8; 32];
+ if let Some(page) = self.page(page_idx) {
+ for (res, src) in result.iter_mut().zip(&page.0[offset_in_page..]) {
+ *res = *src;
+ }
+ }
+ if let Some(page) = self.page(page_idx + 1) {
+ for (res, src) in result[bytes_in_page..].iter_mut().zip(&*page.0) {
+ *res = *src;
+ }
+ }
+ U256::from_big_endian(&result)
+ }
+ }
+
+ pub(crate) fn read_u256_partially(&self, range: Range<u32>) -> U256 {
+ let (page_idx, offset_in_page) = address_to_page_offset(range.start);
+ let length = range.len();
+ let bytes_in_page = length.min(HEAP_PAGE_SIZE - offset_in_page);
+
+ let mut result = [0u8; 32];
+ if let Some(page) = self.page(page_idx) {
+ for (res, src) in result[..bytes_in_page]
+ .iter_mut()
+ .zip(&page.0[offset_in_page..])
+ {
+ *res = *src;
+ }
+ }
+ if let Some(page) = self.page(page_idx + 1) {
+ for (res, src) in result[bytes_in_page..length].iter_mut().zip(&*page.0) {
+ *res = *src;
+ }
+ }
+ U256::from_big_endian(&result)
+ }
+
+ pub(crate) fn read_range_big_endian(&self, range: Range<u32>) -> Vec<u8> {
+ let length = range.len();
+
+ let (mut page_idx, mut offset_in_page) = address_to_page_offset(range.start);
+ let mut result = Vec::with_capacity(length);
+ while result.len() < length {
+ let len_in_page = (length - result.len()).min(HEAP_PAGE_SIZE - offset_in_page);
+ if let Some(page) = self.page(page_idx) {
+ result.extend_from_slice(&page.0[offset_in_page..(offset_in_page + len_in_page)]);
+ } else {
+ result.resize(result.len() + len_in_page, 0);
+ }
+ page_idx += 1;
+ offset_in_page = 0;
+ }
+ result
+ }
+
+ /// Needed only by tracers
+ pub(crate) fn read_byte(&self, address: u32) -> u8 {
+ let (page, offset) = address_to_page_offset(address);
+ self.page(page).map_or(0, |page| page.0[offset])
+ }
+
+ fn page(&self, idx: usize) -> Option<&HeapPage> {
+ self.pages.get(idx)?.as_ref()
+ }
+
+ fn get_or_insert_page(&mut self, idx: usize, pagepool: &mut PagePool) -> &mut HeapPage {
+ if self.pages.len() <= idx {
+ self.pages.resize(idx + 1, None);
+ }
+ self.pages[idx].get_or_insert_with(|| pagepool.allocate_page())
+ }
+
+ fn write_u256(&mut self, start_address: u32, value: U256, pagepool: &mut PagePool) {
+ let (page_idx, offset_in_page) = address_to_page_offset(start_address);
+ let bytes_in_page = HEAP_PAGE_SIZE - offset_in_page;
+ let page = self.get_or_insert_page(page_idx, pagepool);
+
+ if bytes_in_page >= 32 {
+ value.to_big_endian(&mut page.0[offset_in_page..offset_in_page + 32]);
+ } else {
+ let mut bytes = [0; 32];
+ value.to_big_endian(&mut bytes);
+ let mut bytes_iter = bytes.into_iter();
+
+ for (dst, src) in page.0[offset_in_page..].iter_mut().zip(bytes_iter.by_ref()) {
+ *dst = src;
+ }
+
+ let page = self.get_or_insert_page(page_idx + 1, pagepool);
+ for (dst, src) in page.0.iter_mut().zip(bytes_iter) {
+ *dst = src;
+ }
+ }
+ }
+}
+
+#[inline(always)]
+fn address_to_page_offset(address: u32) -> (usize, usize) {
+ let offset = address as usize;
+ (offset >> 12, offset & (HEAP_PAGE_SIZE - 1))
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct Heaps {
+ heaps: Vec<Heap>,
+ pagepool: PagePool,
+ bootloader_heap_rollback_info: Vec<(u32, U256)>,
+ bootloader_aux_rollback_info: Vec<(u32, U256)>,
+}
+
+impl Heaps {
+ pub(crate) fn new(calldata: &[u8]) -> Self {
+ // The first heap can never be used because heap zero
+ // means the current heap in precompile calls
+ let mut pagepool = PagePool::default();
+ Self {
+ heaps: vec![
+ Heap::default(),
+ Heap::from_bytes(calldata, &mut pagepool),
+ Heap::default(),
+ Heap::default(),
+ ],
+ pagepool,
+ bootloader_heap_rollback_info: vec![],
+ bootloader_aux_rollback_info: vec![],
+ }
+ }
+
+ pub(crate) fn allocate(&mut self) -> HeapId {
+ self.allocate_inner(&[])
+ }
+
+ pub(crate) fn allocate_with_content(&mut self, content: &[u8]) -> HeapId {
+ self.allocate_inner(content)
+ }
+
+ fn allocate_inner(&mut self, memory: &[u8]) -> HeapId {
+ let id = u32::try_from(self.heaps.len()).expect("heap ID overflow");
+ let id = HeapId::from_u32_unchecked(id);
+ self.heaps
+ .push(Heap::from_bytes(memory, &mut self.pagepool));
+ id
+ }
+
+ pub(crate) fn deallocate(&mut self, heap: HeapId) {
+ let heap = mem::take(&mut self.heaps[heap.as_u32() as usize]);
+ for page in heap.pages.into_iter().flatten() {
+ self.pagepool.recycle_page(page);
+ }
+ }
+
+ pub(crate) fn write_u256(&mut self, heap: HeapId, start_address: u32, value: U256) {
+ if heap == HeapId::FIRST {
+ let prev_value = self[heap].read_u256(start_address);
+ self.bootloader_heap_rollback_info
+ .push((start_address, prev_value));
+ } else if heap == HeapId::FIRST_AUX {
+ let prev_value = self[heap].read_u256(start_address);
+ self.bootloader_aux_rollback_info
+ .push((start_address, prev_value));
+ }
+ self.heaps[heap.as_u32() as usize].write_u256(start_address, value, &mut self.pagepool);
+ }
+
+ pub(crate) fn snapshot(&self) -> (usize, usize) {
+ (
+ self.bootloader_heap_rollback_info.len(),
+ self.bootloader_aux_rollback_info.len(),
+ )
+ }
+
+ pub(crate) fn rollback(&mut self, (heap_snap, aux_snap): (usize, usize)) {
+ for (address, value) in self.bootloader_heap_rollback_info.drain(heap_snap..).rev() {
+ self.heaps[HeapId::FIRST.as_u32() as usize].write_u256(
+ address,
+ value,
+ &mut self.pagepool,
+ );
+ }
+
+ for (address, value) in self.bootloader_aux_rollback_info.drain(aux_snap..).rev() {
+ self.heaps[HeapId::FIRST_AUX.as_u32() as usize].write_u256(
+ address,
+ value,
+ &mut self.pagepool,
+ );
+ }
+ }
+
+ pub(crate) fn delete_history(&mut self) {
+ self.bootloader_heap_rollback_info.clear();
+ self.bootloader_aux_rollback_info.clear();
+ }
+}
+
+impl Index<HeapId> for Heaps {
+ type Output = Heap;
+
+ fn index(&self, index: HeapId) -> &Self::Output {
+ &self.heaps[index.as_u32() as usize]
+ }
+}
+
+// Since we never remove `Heap` entries (even after rollbacks – although we do deallocate heaps in this case),
+// we allow additional empty heaps at the end of `Heaps`.
+impl PartialEq for Heaps {
+ fn eq(&self, other: &Self) -> bool {
+ for i in 0..self.heaps.len().max(other.heaps.len()) {
+ if self.heaps.get(i).unwrap_or(&Heap::default())
+ != other.heaps.get(i).unwrap_or(&Heap::default())
+ {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+#[derive(Default, Clone)]
+struct PagePool(Vec<HeapPage>);
+
+impl fmt::Debug for PagePool {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter
+ .debug_struct("PagePool")
+ .field("len", &self.0.len())
+ .finish_non_exhaustive()
+ }
+}
+
+impl PagePool {
+ fn allocate_page(&mut self) -> HeapPage {
+ self.get_dirty_page()
+ .map(|mut page| {
+ page.0.fill(0);
+ page
+ })
+ .unwrap_or_default()
+ }
+
+ fn get_dirty_page(&mut self) -> Option<HeapPage> {
+ self.0.pop()
+ }
+
+ fn recycle_page(&mut self, page: HeapPage) {
+ self.0.push(page);
+ }
+}
+
+#[cfg(test)]
+#[allow(clippy::cast_possible_truncation)]
+mod tests {
+ use super::*;
+
+ fn repeat_byte(byte: u8) -> U256 {
+ U256::from_little_endian(&[byte; 32])
+ }
+
+ fn test_heap_write_resizes(recycled_pages: &mut PagePool) {
+ let mut heap = Heap::default();
+ heap.write_u256(5, 1.into(), recycled_pages);
+ assert_eq!(heap.pages.len(), 1);
+ assert_eq!(heap.read_u256(5), 1.into());
+
+ // Check writing at a page boundary
+ heap.write_u256(
+ HEAP_PAGE_SIZE as u32 - 32,
+ repeat_byte(0xaa),
+ recycled_pages,
+ );
+ assert_eq!(heap.pages.len(), 1);
+ assert_eq!(
+ heap.read_u256(HEAP_PAGE_SIZE as u32 - 32),
+ repeat_byte(0xaa)
+ );
+
+ for offset in (1..=31).rev() {
+ heap.write_u256(
+ HEAP_PAGE_SIZE as u32 - offset,
+ repeat_byte(offset as u8),
+ recycled_pages,
+ );
+ assert_eq!(heap.pages.len(), 2);
+ assert_eq!(
+ heap.read_u256(HEAP_PAGE_SIZE as u32 - offset),
+ repeat_byte(offset as u8)
+ );
+ }
+
+ // check reading at a page boundary from a missing page
+ for offset in 0..32 {
+ assert_eq!(heap.read_u256((1 << 20) - offset), 0.into());
+ }
+
+ heap.write_u256(1 << 20, repeat_byte(0xff), recycled_pages);
+ assert_eq!(heap.pages.len(), 257);
+ assert_eq!(heap.pages.iter().flatten().count(), 3);
+ assert_eq!(heap.read_u256(1 << 20), repeat_byte(0xff));
+ }
+
+ #[test]
+ fn heap_write_resizes() {
+ test_heap_write_resizes(&mut PagePool::default());
+ }
+
+ #[test]
+ fn heap_write_resizes_with_recycled_pages() {
+ test_heap_write_resizes(&mut populated_pagepool());
+ }
+
+ fn populated_pagepool() -> PagePool {
+ let mut pagepool = PagePool::default();
+ for _ in 0..10 {
+ let mut page = HeapPage::default();
+ // Fill pages with 0xff bytes to detect not clearing pages
+ page.0.fill(0xff);
+ pagepool.recycle_page(page);
+ }
+ pagepool
+ }
+
+ #[test]
+ fn reading_heap_range() {
+ let mut heap = Heap::default();
+ let offsets = [
+ 0_u32,
+ 10,
+ HEAP_PAGE_SIZE as u32 - 10,
+ HEAP_PAGE_SIZE as u32 + 10,
+ (1 << 20) - 10,
+ 1 << 20,
+ (1 << 20) + 10,
+ ];
+ for offset in offsets {
+ for length in [0, 1, 10, 31, 32, 1_024, 32_768] {
+ let data = heap.read_range_big_endian(offset..offset + length);
+ assert_eq!(data.len(), length as usize);
+ assert!(data.iter().all(|&byte| byte == 0));
+ }
+ }
+
+ for (i, offset) in offsets.into_iter().enumerate() {
+ let bytes: Vec<_> = (i..i + 32).map(|byte| byte as u8).collect();
+ heap.write_u256(
+ offset,
+ U256::from_big_endian(&bytes),
+ &mut PagePool::default(),
+ );
+ for length in 1..=32 {
+ let data = heap.read_range_big_endian(offset..offset + length);
+ assert_eq!(data, bytes[..length as usize]);
+ }
+ }
+ }
+
+ #[test]
+ fn heap_partial_u256_reads() {
+ let mut heap = Heap::default();
+ let bytes: Vec<_> = (1..=32).collect();
+ heap.write_u256(0, U256::from_big_endian(&bytes), &mut PagePool::default());
+ for length in 1..=32 {
+ let read = heap.read_u256_partially(0..length);
+ // Mask is 0xff...ff00..00, where the number of `0xff` bytes is the number of read bytes
+ let mask = U256::MAX << (8 * (32 - length));
+ assert_eq!(read, U256::from_big_endian(&bytes) & mask);
+ }
+
+ // The same test at the page boundary.
+ let offset = HEAP_PAGE_SIZE as u32 - 10;
+ heap.write_u256(
+ offset,
+ U256::from_big_endian(&bytes),
+ &mut PagePool::default(),
+ );
+ for length in 1..=32 {
+ let read = heap.read_u256_partially(offset..offset + length);
+ let mask = U256::MAX << (8 * (32 - length));
+ assert_eq!(read, U256::from_big_endian(&bytes) & mask);
+ }
+ }
+
+ #[test]
+ fn heap_read_out_of_bounds() {
+ let heap = Heap::default();
+ assert_eq!(heap.read_u256(5), 0.into());
+ }
+
+ fn test_creating_heap_from_bytes(recycled_pages: &mut PagePool) {
+ let bytes: Vec<_> = (0..=u8::MAX).collect();
+ let heap = Heap::from_bytes(&bytes, recycled_pages);
+ assert_eq!(heap.pages.len(), 1);
+
+ assert_eq!(heap.read_range_big_endian(0..256), bytes);
+ for offset in 0..256 - 32 {
+ let value = heap.read_u256(offset as u32);
+ assert_eq!(value, U256::from_big_endian(&bytes[offset..offset + 32]));
+ }
+
+ // Test larger heap with multiple pages.
+ let bytes: Vec<_> = (0..HEAP_PAGE_SIZE * 5 / 2).map(|byte| byte as u8).collect();
+ let heap = Heap::from_bytes(&bytes, recycled_pages);
+ assert_eq!(heap.pages.len(), 3);
+
+ assert_eq!(
+ heap.read_range_big_endian(0..HEAP_PAGE_SIZE as u32 * 5 / 2),
+ bytes
+ );
+ for len in [
+ 1,
+ 10,
+ 100,
+ HEAP_PAGE_SIZE / 3,
+ HEAP_PAGE_SIZE / 2,
+ HEAP_PAGE_SIZE,
+ 2 * HEAP_PAGE_SIZE,
+ ] {
+ for offset in 0..(HEAP_PAGE_SIZE * 5 / 2 - len) {
+ assert_eq!(
+ heap.read_range_big_endian(offset as u32..(offset + len) as u32),
+ bytes[offset..offset + len]
+ );
+ }
+ }
+
+ for offset in 0..HEAP_PAGE_SIZE * 5 / 2 - 32 {
+ let value = heap.read_u256(offset as u32);
+ assert_eq!(value, U256::from_big_endian(&bytes[offset..offset + 32]));
+ }
+ }
+
+ #[test]
+ fn creating_heap_from_bytes() {
+ test_creating_heap_from_bytes(&mut PagePool::default());
+ }
+
+ #[test]
+ fn creating_heap_from_bytes_with_recycling() {
+ test_creating_heap_from_bytes(&mut populated_pagepool());
+ }
+
+ #[test]
+ fn rolling_back_heaps() {
+ let mut heaps = Heaps::new(b"test");
+ let written_value = U256::from(123_456_789) << 224; // writes bytes 0..4
+ heaps.write_u256(HeapId::FIRST, 0, written_value);
+ assert_eq!(heaps[HeapId::FIRST].read_u256(0), written_value);
+ heaps.write_u256(HeapId::FIRST_AUX, 0, 42.into());
+ assert_eq!(heaps[HeapId::FIRST_AUX].read_u256(0), 42.into());
+
+ let snapshot = heaps.snapshot();
+ assert_eq!(snapshot, (1, 1));
+
+ heaps.write_u256(HeapId::FIRST, 7, U256::MAX);
+ assert_eq!(
+ heaps[HeapId::FIRST].read_u256(0),
+ written_value + (U256::MAX >> 56)
+ );
+ heaps.write_u256(HeapId::FIRST_AUX, 16, U256::MAX);
+ assert_eq!(heaps[HeapId::FIRST_AUX].read_u256(16), U256::MAX);
+
+ heaps.rollback(snapshot);
+ assert_eq!(heaps[HeapId::FIRST].read_u256(0), written_value);
+ assert_eq!(heaps[HeapId::FIRST_AUX].read_u256(0), 42.into());
+ assert_eq!(heaps.bootloader_heap_rollback_info.len(), 1);
+ assert_eq!(heaps.bootloader_aux_rollback_info.len(), 1);
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +
use std::fmt;
+
+use crate::{addressing_modes::Arguments, vm::VirtualMachine};
+
+/// Single EraVM instruction (an opcode + [`Arguments`]).
+///
+/// Managing instructions is warranted for low-level tests; prefer using [`Program`](crate::Program)s to decode instructions
+/// from EraVM bytecodes.
+pub struct Instruction<T, W> {
+ pub(crate) handler: Handler<T, W>,
+ pub(crate) arguments: Arguments,
+}
+
+impl<T, W> fmt::Debug for Instruction<T, W> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter
+ .debug_struct("Instruction")
+ .field("arguments", &self.arguments)
+ .finish_non_exhaustive()
+ }
+}
+
+pub(crate) type Handler<T, W> = fn(&mut VirtualMachine<T, W>, &mut W, &mut T) -> ExecutionStatus;
+
+#[derive(Debug)]
+pub(crate) enum ExecutionStatus {
+ Running,
+ Stopped(ExecutionEnd),
+}
+
+/// VM stop reason returned from [`VirtualMachine::run()`].
+#[derive(Debug, PartialEq)]
+pub enum ExecutionEnd {
+ /// The executed program has finished and returned the specified data.
+ ProgramFinished(Vec<u8>),
+ /// The executed program has reverted returning the specified data.
+ Reverted(Vec<u8>),
+ /// The executed program has panicked.
+ Panicked,
+ /// Returned when the bootloader writes to the heap location specified by [`hook_address`](crate::Settings.hook_address).
+ SuspendedOnHook(u32),
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +
use primitive_types::U256;
+use zksync_vm2_interface::{
+ opcodes::{Add, And, Div, Mul, Or, RotateLeft, RotateRight, ShiftLeft, ShiftRight, Sub, Xor},
+ OpcodeType, Tracer,
+};
+
+use super::{
+ common::boilerplate,
+ monomorphization::{
+ match_boolean, match_destination, match_source, monomorphize, parameterize,
+ },
+};
+use crate::{
+ addressing_modes::{
+ AbsoluteStack, Addressable, AdvanceStackPointer, AnyDestination, AnySource, Arguments,
+ CodePage, Destination, DestinationWriter, Immediate1, Register1, Register2, RelativeStack,
+ Source,
+ },
+ instruction::{ExecutionStatus, Instruction},
+ predication::Flags,
+ VirtualMachine, World,
+};
+
+fn binop<T, W, Op, In1, Out, const SWAP: bool, const SET_FLAGS: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus
+where
+ T: Tracer,
+ Op: Binop,
+ In1: Source,
+ Out: Destination,
+{
+ boilerplate::<Op, _, _>(vm, world, tracer, |vm, args| {
+ let a = In1::get(args, &mut vm.state);
+ let b = Register2::get(args, &mut vm.state);
+ let (a, b) = if SWAP { (b, a) } else { (a, b) };
+
+ let (result, out2, flags) = Op::perform(&a, &b);
+ Out::set(args, &mut vm.state, result);
+ out2.write(args, &mut vm.state);
+ if SET_FLAGS {
+ vm.state.flags = flags;
+ }
+ })
+}
+
+pub(crate) trait Binop: OpcodeType {
+ type Out2: SecondOutput;
+ fn perform(a: &U256, b: &U256) -> (U256, Self::Out2, Flags);
+}
+
+impl Binop for Add {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let (result, overflow) = a.overflowing_add(*b);
+ (
+ result,
+ (),
+ Flags::new(overflow, result.is_zero(), !(overflow || result.is_zero())),
+ )
+ }
+ type Out2 = ();
+}
+
+impl Binop for Sub {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let (result, overflow) = a.overflowing_sub(*b);
+ (
+ result,
+ (),
+ Flags::new(overflow, result.is_zero(), !(overflow || result.is_zero())),
+ )
+ }
+ type Out2 = ();
+}
+
+impl Binop for And {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let result = *a & *b;
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for Or {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let result = *a | *b;
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for Xor {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let result = *a ^ *b;
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for ShiftLeft {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let result = *a << (b.low_u32() % 256);
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for ShiftRight {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let result = *a >> (b.low_u32() % 256);
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for RotateLeft {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let shift = b.low_u32() % 256;
+ let result = *a << shift | *a >> (256 - shift);
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+impl Binop for RotateRight {
+ #[inline(always)]
+ fn perform(a: &U256, b: &U256) -> (U256, (), Flags) {
+ let shift = b.low_u32() % 256;
+ let result = *a >> shift | *a << (256 - shift);
+ (result, (), Flags::new(false, result.is_zero(), false))
+ }
+ type Out2 = ();
+}
+
+/// Second output of a binary operation.
+pub(crate) trait SecondOutput {
+ type Destination: DestinationWriter;
+ fn write(self, args: &Arguments, state: &mut impl Addressable);
+}
+
+impl SecondOutput for () {
+ type Destination = ();
+ fn write(self, _: &Arguments, _: &mut impl Addressable) {}
+}
+
+impl DestinationWriter for () {
+ fn write_destination(&self, _: &mut Arguments) {}
+}
+
+impl SecondOutput for U256 {
+ type Destination = Register2;
+ fn write(self, args: &Arguments, state: &mut impl Addressable) {
+ Self::Destination::set(args, state, self);
+ }
+}
+
+impl Binop for Mul {
+ fn perform(a: &U256, b: &U256) -> (U256, Self::Out2, Flags) {
+ let res = a.full_mul(*b);
+ let (low_slice, high_slice) = res.0.split_at(4);
+
+ let mut low_arr = [0; 4];
+ low_arr.copy_from_slice(low_slice);
+ let low = U256(low_arr);
+
+ let mut high_arr = [0; 4];
+ high_arr.copy_from_slice(high_slice);
+ let high = U256(high_arr);
+
+ (
+ low,
+ high,
+ Flags::new(
+ !high.is_zero(),
+ low.is_zero(),
+ high.is_zero() && !low.is_zero(),
+ ),
+ )
+ }
+ type Out2 = U256;
+}
+
+impl Binop for Div {
+ fn perform(a: &U256, b: &U256) -> (U256, Self::Out2, Flags) {
+ if b.is_zero() {
+ (U256::zero(), U256::zero(), Flags::new(true, false, false))
+ } else {
+ let (quotient, remainder) = a.div_mod(*b);
+ (
+ quotient,
+ remainder,
+ Flags::new(false, quotient.is_zero(), remainder.is_zero()),
+ )
+ }
+ }
+ type Out2 = U256;
+}
+
+macro_rules! from_binop {
+ ($name:ident <$binop:ty>) => {
+ #[doc = concat!("Creates [`", stringify!($binop), "`] instruction with the provided params.")]
+ pub fn $name(
+ src1: AnySource,
+ src2: Register2,
+ out: AnyDestination,
+ arguments: Arguments,
+ swap: bool,
+ set_flags: bool,
+ ) -> Self {
+ Self::from_binop::<$binop>(src1, src2, out, &(), arguments, swap, set_flags)
+ }
+ };
+
+ ($name:ident <$binop:ty, $out2: ty>) => {
+ #[doc = concat!("Creates [`", stringify!($binop), "`] instruction with the provided params.")]
+ pub fn $name(
+ src1: AnySource,
+ src2: Register2,
+ out: AnyDestination,
+ out2: $out2,
+ arguments: Arguments,
+ swap: bool,
+ set_flags: bool,
+ ) -> Self {
+ Self::from_binop::<$binop>(src1, src2, out, &out2, arguments, swap, set_flags)
+ }
+ };
+}
+
+/// Instructions for binary operations.
+impl<T: Tracer, W: World<T>> Instruction<T, W> {
+ pub(crate) fn from_binop<Op: Binop>(
+ src1: AnySource,
+ src2: Register2,
+ out: AnyDestination,
+ out2: &<Op::Out2 as SecondOutput>::Destination,
+ arguments: Arguments,
+ swap: bool,
+ set_flags: bool,
+ ) -> Self {
+ Self {
+ handler: monomorphize!(binop [T W Op] match_source src1 match_destination out match_boolean swap match_boolean set_flags),
+ arguments: arguments
+ .write_source(&src1)
+ .write_source(&src2)
+ .write_destination(&out)
+ .write_destination(out2),
+ }
+ }
+
+ from_binop!(from_add<Add>);
+ from_binop!(from_sub<Sub>);
+ from_binop!(from_and<And>);
+ from_binop!(from_or<Or>);
+ from_binop!(from_xor<Xor>);
+ from_binop!(from_shift_left<ShiftLeft>);
+ from_binop!(from_shift_right<ShiftRight>);
+ from_binop!(from_rotate_left<RotateLeft>);
+ from_binop!(from_rotate_right<RotateRight>);
+
+ from_binop!(from_mul <Mul, Register2>);
+ from_binop!(from_div <Div, Register2>);
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +
use zksync_vm2_interface::{opcodes, OpcodeType, Tracer};
+
+use super::ret::free_panic;
+use crate::{addressing_modes::Arguments, instruction::ExecutionStatus, VirtualMachine};
+
+#[inline(always)]
+pub(crate) fn boilerplate<Opcode: OpcodeType, T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+ business_logic: impl FnOnce(&mut VirtualMachine<T, W>, &Arguments),
+) -> ExecutionStatus {
+ full_boilerplate::<Opcode, T, W>(vm, world, tracer, |vm, args, _, _| {
+ business_logic(vm, args);
+ ExecutionStatus::Running
+ })
+}
+
+#[inline(always)]
+pub(crate) fn boilerplate_ext<Opcode: OpcodeType, T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+ business_logic: impl FnOnce(&mut VirtualMachine<T, W>, &Arguments, &mut W, &mut T),
+) -> ExecutionStatus {
+ full_boilerplate::<Opcode, T, W>(vm, world, tracer, |vm, args, world, tracer| {
+ business_logic(vm, args, world, tracer);
+ ExecutionStatus::Running
+ })
+}
+
+#[inline(always)]
+pub(crate) fn full_boilerplate<Opcode: OpcodeType, T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+ business_logic: impl FnOnce(
+ &mut VirtualMachine<T, W>,
+ &Arguments,
+ &mut W,
+ &mut T,
+ ) -> ExecutionStatus,
+) -> ExecutionStatus {
+ let args = unsafe { &(*vm.state.current_frame.pc).arguments };
+
+ if vm.state.use_gas(args.get_static_gas_cost()).is_err()
+ || !args.mode_requirements().met(
+ vm.state.current_frame.is_kernel,
+ vm.state.current_frame.is_static,
+ )
+ {
+ return free_panic(vm, tracer);
+ }
+
+ if args.predicate().satisfied(&vm.state.flags) {
+ tracer.before_instruction::<Opcode, _>(vm);
+ vm.state.current_frame.pc = unsafe { vm.state.current_frame.pc.add(1) };
+ let result = business_logic(vm, args, world, tracer);
+ tracer.after_instruction::<Opcode, _>(vm);
+ result
+ } else {
+ tracer.before_instruction::<opcodes::Nop, _>(vm);
+ vm.state.current_frame.pc = unsafe { vm.state.current_frame.pc.add(1) };
+ tracer.after_instruction::<opcodes::Nop, _>(vm);
+ ExecutionStatus::Running
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +
use primitive_types::{H160, U256};
+use zkevm_opcode_defs::VmMetaParameters;
+use zksync_vm2_interface::{
+ opcodes::{self, Caller, CodeAddress, ContextU128, ErgsLeft, This, SP},
+ OpcodeType, Tracer,
+};
+
+use super::common::boilerplate;
+use crate::{
+ addressing_modes::{Arguments, Destination, Register1, Source},
+ instruction::ExecutionStatus,
+ state::State,
+ Instruction, VirtualMachine,
+};
+
+pub(crate) fn address_into_u256(address: H160) -> U256 {
+ let mut buffer = [0; 32];
+ buffer[12..].copy_from_slice(address.as_bytes());
+ U256::from_big_endian(&buffer)
+}
+
+fn context<T, W, Op>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus
+where
+ T: Tracer,
+ Op: ContextOp,
+{
+ boilerplate::<Op, _, _>(vm, world, tracer, |vm, args| {
+ let result = Op::get(&vm.state);
+ Register1::set(args, &mut vm.state, result);
+ })
+}
+
+trait ContextOp: OpcodeType {
+ fn get<T, W>(state: &State<T, W>) -> U256;
+}
+
+impl ContextOp for This {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ address_into_u256(state.current_frame.address)
+ }
+}
+
+impl ContextOp for Caller {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ address_into_u256(state.current_frame.caller)
+ }
+}
+
+impl ContextOp for CodeAddress {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ address_into_u256(state.current_frame.code_address)
+ }
+}
+
+impl ContextOp for ErgsLeft {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ U256([state.current_frame.gas.into(), 0, 0, 0])
+ }
+}
+
+impl ContextOp for ContextU128 {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ state.get_context_u128().into()
+ }
+}
+
+impl ContextOp for SP {
+ fn get<T, W>(state: &State<T, W>) -> U256 {
+ state.current_frame.sp.into()
+ }
+}
+
+fn context_meta<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::ContextMeta, _, _>(vm, world, tracer, |vm, args| {
+ let result = VmMetaParameters {
+ heap_size: vm.state.current_frame.heap_size,
+ aux_heap_size: vm.state.current_frame.aux_heap_size,
+ this_shard_id: 0, // TODO properly implement shards
+ caller_shard_id: 0,
+ code_shard_id: 0,
+ // This field is actually pubdata!
+ aux_field_0: if vm.state.current_frame.is_kernel {
+ #[allow(clippy::cast_sign_loss)] // wrapping conversion is intentional
+ {
+ vm.world_diff.pubdata.0 as u32
+ }
+ } else {
+ 0
+ },
+ }
+ .to_u256();
+
+ Register1::set(args, &mut vm.state, result);
+ })
+}
+
+fn set_context_u128<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::SetContextU128, _, _>(vm, world, tracer, |vm, args| {
+ let value = Register1::get(args, &mut vm.state).low_u128();
+ vm.state.set_context_u128(value);
+ })
+}
+
+fn increment_tx_number<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::IncrementTxNumber, _, _>(vm, world, tracer, |vm, _| {
+ vm.start_new_tx();
+ })
+}
+
+fn aux_mutating<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::AuxMutating0, _, _>(vm, world, tracer, |_, _| {
+ // This instruction just crashes or nops
+ })
+}
+
+/// Context-related instructions.
+impl<T: Tracer, W> Instruction<T, W> {
+ fn from_context<Op: ContextOp>(out: Register1, arguments: Arguments) -> Self {
+ Self {
+ handler: context::<T, W, Op>,
+ arguments: arguments.write_destination(&out),
+ }
+ }
+
+ /// Creates a [`This`] instruction with the provided params.
+ pub fn from_this(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<This>(out, arguments)
+ }
+
+ /// Creates a [`Caller`] instruction with the provided params.
+ pub fn from_caller(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<Caller>(out, arguments)
+ }
+
+ /// Creates a [`CodeAddress`] instruction with the provided params.
+ pub fn from_code_address(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<CodeAddress>(out, arguments)
+ }
+
+ /// Creates an [`ErgsLeft`] instruction with the provided params.
+ pub fn from_ergs_left(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<ErgsLeft>(out, arguments)
+ }
+
+ /// Creates a [`ContextU128`] instruction with the provided params.
+ pub fn from_context_u128(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<ContextU128>(out, arguments)
+ }
+
+ /// Creates an [`SP`] instruction with the provided params.
+ pub fn from_context_sp(out: Register1, arguments: Arguments) -> Self {
+ Self::from_context::<SP>(out, arguments)
+ }
+
+ /// Creates a [`ContextMeta`](opcodes::ContextMeta) instruction with the provided params.
+ pub fn from_context_meta(out: Register1, arguments: Arguments) -> Self {
+ Self {
+ handler: context_meta,
+ arguments: arguments.write_destination(&out),
+ }
+ }
+
+ /// Creates a [`SetContextU128`](opcodes::SetContextU128) instruction with the provided params.
+ pub fn from_set_context_u128(src: Register1, arguments: Arguments) -> Self {
+ Self {
+ handler: set_context_u128,
+ arguments: arguments.write_source(&src),
+ }
+ }
+
+ /// Creates an [`IncrementTxNumber`](opcodes::IncrementTxNumber) instruction with the provided params.
+ pub fn from_increment_tx_number(arguments: Arguments) -> Self {
+ Self {
+ handler: increment_tx_number,
+ arguments,
+ }
+ }
+
+ /// Creates an [`AuxMutating0`](opcodes::AuxMutating0) instruction with the provided params.
+ pub fn from_aux_mutating(arguments: Arguments) -> Self {
+ Self {
+ handler: aux_mutating,
+ arguments,
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +
use primitive_types::U256;
+use zkevm_opcode_defs::{BlobSha256Format, ContractCodeSha256Format, VersionedHashLen32};
+use zksync_vm2_interface::{opcodes, Tracer};
+
+use super::common::boilerplate_ext;
+use crate::{
+ addressing_modes::{Arguments, Destination, Register1, Register2, Source},
+ fat_pointer::FatPointer,
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine, World,
+};
+
+fn decommit<T: Tracer, W: World<T>>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::Decommit, _, _>(vm, world, tracer, |vm, args, world, tracer| {
+ let code_hash = Register1::get(args, &mut vm.state);
+ let extra_cost = Register2::get(args, &mut vm.state).low_u32();
+
+ let mut buffer = [0u8; 32];
+ code_hash.to_big_endian(&mut buffer);
+
+ let preimage_len_in_bytes =
+ zkevm_opcode_defs::system_params::NEW_KERNEL_FRAME_MEMORY_STIPEND;
+
+ if vm.state.use_gas(extra_cost).is_err()
+ || (!ContractCodeSha256Format::is_valid(&buffer)
+ && !BlobSha256Format::is_valid(&buffer))
+ {
+ Register1::set(args, &mut vm.state, U256::zero());
+ return;
+ }
+
+ let (program, is_fresh) = vm.world_diff.decommit_opcode(world, tracer, code_hash);
+ if !is_fresh {
+ vm.state.current_frame.gas += extra_cost;
+ }
+
+ let heap = vm.state.heaps.allocate_with_content(program.as_ref());
+ vm.state.current_frame.heaps_i_am_keeping_alive.push(heap);
+
+ let value = FatPointer {
+ offset: 0,
+ memory_page: heap,
+ start: 0,
+ length: preimage_len_in_bytes,
+ };
+ let value = value.into_u256();
+ Register1::set_fat_ptr(args, &mut vm.state, value);
+ })
+}
+
+impl<T: Tracer, W: World<T>> Instruction<T, W> {
+ /// Creates a [`Decommit`](opcodes::Decommit) instruction with the provided params.
+ pub fn from_decommit(
+ abi: Register1,
+ burn: Register2,
+ out: Register1,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ arguments: arguments
+ .write_source(&abi)
+ .write_source(&burn)
+ .write_destination(&out),
+ handler: decommit,
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +
use primitive_types::H160;
+use zkevm_opcode_defs::ADDRESS_EVENT_WRITER;
+use zksync_vm2_interface::{opcodes, Event, L2ToL1Log, Tracer};
+
+use super::common::boilerplate_ext;
+use crate::{
+ addressing_modes::{Arguments, Immediate1, Register1, Register2, Source},
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine,
+};
+
+fn event<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::Event, _, _>(vm, world, tracer, |vm, args, _, _| {
+ if vm.state.current_frame.address == H160::from_low_u64_be(ADDRESS_EVENT_WRITER.into()) {
+ let key = Register1::get(args, &mut vm.state);
+ let value = Register2::get(args, &mut vm.state);
+ let is_first = Immediate1::get(args, &mut vm.state).low_u32() == 1;
+
+ vm.world_diff.record_event(Event {
+ key,
+ value,
+ is_first,
+ shard_id: 0, // shards currently aren't supported
+ tx_number: vm.state.transaction_number,
+ });
+ }
+ })
+}
+
+fn l2_to_l1<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::L2ToL1Message, _, _>(vm, world, tracer, |vm, args, _, _| {
+ let key = Register1::get(args, &mut vm.state);
+ let value = Register2::get(args, &mut vm.state);
+ let is_service = Immediate1::get(args, &mut vm.state).low_u32() == 1;
+ vm.world_diff.record_l2_to_l1_log(L2ToL1Log {
+ key,
+ value,
+ is_service,
+ address: vm.state.current_frame.address,
+ shard_id: 0,
+ tx_number: vm.state.transaction_number,
+ });
+ })
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates an [`Event`](opcodes::Event) instruction with the provided params.
+ pub fn from_event(
+ key: Register1,
+ value: Register2,
+ is_first: bool,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: event,
+ arguments: arguments
+ .write_source(&key)
+ .write_source(&value)
+ .write_source(&Immediate1(is_first.into())),
+ }
+ }
+
+ /// Creates an [`L2ToL1Message`](opcodes::L2ToL1Message) instruction with the provided params.
+ pub fn from_l2_to_l1_message(
+ key: Register1,
+ value: Register2,
+ is_service: bool,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: l2_to_l1,
+ arguments: arguments
+ .write_source(&key)
+ .write_source(&value)
+ .write_source(&Immediate1(is_service.into())),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +
use primitive_types::U256;
+use zkevm_opcode_defs::{
+ system_params::{EVM_SIMULATOR_STIPEND, MSG_VALUE_SIMULATOR_ADDITIVE_COST},
+ ADDRESS_MSG_VALUE,
+};
+use zksync_vm2_interface::{
+ opcodes::{FarCall, TypeLevelCallingMode},
+ Tracer,
+};
+
+use super::{
+ common::boilerplate_ext,
+ heap_access::grow_heap,
+ monomorphization::{match_boolean, monomorphize, parameterize},
+ ret::{panic_from_failed_far_call, RETURN_COST},
+ AuxHeap, Heap,
+};
+use crate::{
+ addressing_modes::{Arguments, Immediate1, Register1, Register2, Source},
+ decommit::{is_kernel, u256_into_address},
+ fat_pointer::FatPointer,
+ instruction::ExecutionStatus,
+ predication::Flags,
+ Instruction, VirtualMachine, World,
+};
+
+/// A call to another contract.
+///
+/// First, the code of the called contract is fetched and a fat pointer is created
+/// or and existing one is forwarded. Costs for decommitting and memory growth are paid
+/// at this point.
+///
+/// A new stack frame is pushed. At most 63/64 of the *remaining* gas is passed to the called contract.
+///
+/// Even though all errors happen before the new stack frame, they cause a panic in the new frame,
+/// not in the caller!
+fn far_call<T, W, M, const IS_STATIC: bool, const IS_SHARD: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus
+where
+ T: Tracer,
+ W: World<T>,
+ M: TypeLevelCallingMode,
+{
+ boilerplate_ext::<FarCall<M>, _, _>(vm, world, tracer, |vm, args, world, tracer| {
+ let (raw_abi, raw_abi_is_pointer) = Register1::get_with_pointer_flag(args, &mut vm.state);
+
+ let address_mask: U256 = U256::MAX >> (256 - 160);
+ let destination_address = Register2::get(args, &mut vm.state) & address_mask;
+ let exception_handler = Immediate1::get_u16(args);
+
+ let mut abi = get_far_call_arguments(raw_abi);
+ abi.is_constructor_call = abi.is_constructor_call && vm.state.current_frame.is_kernel;
+ abi.is_system_call =
+ abi.is_system_call && is_kernel(u256_into_address(destination_address));
+
+ let mut mandated_gas =
+ if abi.is_system_call && destination_address == ADDRESS_MSG_VALUE.into() {
+ MSG_VALUE_SIMULATOR_ADDITIVE_COST
+ } else {
+ 0
+ };
+
+ let failing_part = (|| {
+ let decommit_result = vm.world_diff.decommit(
+ world,
+ tracer,
+ destination_address,
+ vm.settings.default_aa_code_hash,
+ vm.settings.evm_interpreter_code_hash,
+ abi.is_constructor_call,
+ );
+
+ // calldata has to be constructed even if we already know we will panic because
+ // overflowing start + length makes the heap resize even when already panicking.
+ let already_failed = decommit_result.is_none() || IS_SHARD && abi.shard_id != 0;
+
+ let maybe_calldata =
+ get_far_call_calldata(raw_abi, raw_abi_is_pointer, vm, already_failed);
+
+ // mandated gas is passed even if it means transferring more than the 63/64 rule allows
+ if let Some(gas_left) = vm.state.current_frame.gas.checked_sub(mandated_gas) {
+ vm.state.current_frame.gas = gas_left;
+ } else {
+ // If the gas is insufficient, the rest is burned
+ vm.state.current_frame.gas = 0;
+ mandated_gas = 0;
+ return None;
+ };
+
+ let calldata = maybe_calldata?;
+ let (unpaid_decommit, is_evm) = decommit_result?;
+ let program = vm.world_diff.pay_for_decommit(
+ world,
+ tracer,
+ unpaid_decommit,
+ &mut vm.state.current_frame.gas,
+ )?;
+
+ Some((calldata, program, is_evm))
+ })();
+
+ let maximum_gas = vm.state.current_frame.gas / 64 * 63;
+ let normally_passed_gas = abi.gas_to_pass.min(maximum_gas);
+ vm.state.current_frame.gas -= normally_passed_gas;
+ let new_frame_gas = normally_passed_gas + mandated_gas;
+
+ let Some((calldata, program, is_evm_interpreter)) = failing_part else {
+ vm.state.current_frame.gas += new_frame_gas.saturating_sub(RETURN_COST);
+ panic_from_failed_far_call(vm, tracer, exception_handler);
+ return;
+ };
+
+ let stipend = if is_evm_interpreter {
+ EVM_SIMULATOR_STIPEND
+ } else {
+ 0
+ };
+ let new_frame_gas = new_frame_gas
+ .checked_add(stipend)
+ .expect("stipend must not cause overflow");
+
+ let new_frame_is_static = IS_STATIC || vm.state.current_frame.is_static;
+ vm.push_frame::<M>(
+ u256_into_address(destination_address),
+ program,
+ new_frame_gas,
+ stipend,
+ exception_handler,
+ new_frame_is_static && !is_evm_interpreter,
+ calldata.memory_page,
+ vm.world_diff.snapshot(),
+ );
+
+ vm.state.flags = Flags::new(false, false, false);
+
+ if abi.is_system_call {
+ // r3 to r12 are kept but they lose their pointer flags
+ vm.state.registers[13] = U256::zero();
+ vm.state.registers[14] = U256::zero();
+ vm.state.registers[15] = U256::zero();
+ } else {
+ vm.state.registers = [U256::zero(); 16];
+ }
+
+ // Only r1 is a pointer
+ vm.state.register_pointer_flags = 2;
+ vm.state.registers[1] = calldata.into_u256();
+
+ let is_static_call_to_evm_interpreter = new_frame_is_static && is_evm_interpreter;
+ let call_type = (u8::from(is_static_call_to_evm_interpreter) << 2)
+ | (u8::from(abi.is_system_call) << 1)
+ | u8::from(abi.is_constructor_call);
+
+ vm.state.registers[2] = call_type.into();
+ })
+}
+
+#[derive(Debug)]
+pub(crate) struct FarCallABI {
+ pub(crate) gas_to_pass: u32,
+ pub(crate) shard_id: u8,
+ pub(crate) is_constructor_call: bool,
+ pub(crate) is_system_call: bool,
+}
+
+#[allow(clippy::cast_possible_truncation)] // intentional
+fn get_far_call_arguments(abi: U256) -> FarCallABI {
+ let gas_to_pass = abi.0[3] as u32;
+ let settings = (abi.0[3] >> 32) as u32;
+ let [_, shard_id, constructor_call_byte, system_call_byte] = settings.to_le_bytes();
+
+ FarCallABI {
+ gas_to_pass,
+ shard_id,
+ is_constructor_call: constructor_call_byte != 0,
+ is_system_call: system_call_byte != 0,
+ }
+}
+
+/// Forms a new fat pointer or narrows an existing one, as dictated by the ABI.
+///
+/// This function needs to be called even if we already know we will panic because
+/// overflowing start + length makes the heap resize even when already panicking.
+pub(crate) fn get_far_call_calldata<T, W>(
+ raw_abi: U256,
+ is_pointer: bool,
+ vm: &mut VirtualMachine<T, W>,
+ already_failed: bool,
+) -> Option<FatPointer> {
+ let mut pointer = FatPointer::from(raw_abi);
+ #[allow(clippy::cast_possible_truncation)]
+ // intentional: the source is encoded in the lower byte of the extracted value
+ let raw_source = (raw_abi.0[3] >> 32) as u8;
+
+ match FatPointerSource::from_abi(raw_source) {
+ FatPointerSource::ForwardFatPointer => {
+ if !is_pointer || pointer.offset > pointer.length || already_failed {
+ return None;
+ }
+
+ pointer.narrow();
+ }
+ FatPointerSource::MakeNewPointer(target) => {
+ if let Some(bound) = pointer.start.checked_add(pointer.length) {
+ if is_pointer || pointer.offset != 0 || already_failed {
+ return None;
+ }
+ match target {
+ FatPointerTarget::ToHeap => {
+ grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?;
+ pointer.memory_page = vm.state.current_frame.heap;
+ }
+ FatPointerTarget::ToAuxHeap => {
+ grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?;
+ pointer.memory_page = vm.state.current_frame.aux_heap;
+ }
+ }
+ } else {
+ // The heap is grown even if the pointer goes out of the heap
+ // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
+ let bound = u32::MAX;
+ match target {
+ FatPointerTarget::ToHeap => {
+ grow_heap::<_, _, Heap>(&mut vm.state, bound).ok()?;
+ }
+ FatPointerTarget::ToAuxHeap => {
+ grow_heap::<_, _, AuxHeap>(&mut vm.state, bound).ok()?;
+ }
+ }
+ return None;
+ }
+ }
+ }
+
+ Some(pointer)
+}
+
+#[derive(Debug)]
+enum FatPointerSource {
+ MakeNewPointer(FatPointerTarget),
+ ForwardFatPointer,
+}
+
+#[derive(Debug)]
+enum FatPointerTarget {
+ ToHeap,
+ ToAuxHeap,
+}
+
+impl FatPointerSource {
+ const fn from_abi(value: u8) -> Self {
+ match value {
+ 1 => Self::ForwardFatPointer,
+ 2 => Self::MakeNewPointer(FatPointerTarget::ToAuxHeap),
+ _ => Self::MakeNewPointer(FatPointerTarget::ToHeap), // default
+ }
+ }
+}
+
+impl FatPointer {
+ fn narrow(&mut self) {
+ self.start += self.offset;
+ self.length -= self.offset;
+ self.offset = 0;
+ }
+}
+
+impl<T: Tracer, W: World<T>> Instruction<T, W> {
+ /// Creates a [`FarCall`] instruction with the provided mode and params.
+ pub fn from_far_call<M: TypeLevelCallingMode>(
+ src1: Register1,
+ src2: Register2,
+ error_handler: Immediate1,
+ is_static: bool,
+ is_shard: bool,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: monomorphize!(far_call [T W M] match_boolean is_static match_boolean is_shard),
+ arguments: arguments
+ .write_source(&src1)
+ .write_source(&src2)
+ .write_source(&error_handler),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +
use primitive_types::U256;
+use zksync_vm2_interface::{opcodes, HeapId, OpcodeType, Tracer};
+
+use super::{
+ common::{boilerplate, full_boilerplate},
+ monomorphization::{match_boolean, match_reg_imm, monomorphize, parameterize},
+ ret::spontaneous_panic,
+};
+use crate::{
+ addressing_modes::{
+ Arguments, Destination, DestinationWriter, Immediate1, Register1, Register2,
+ RegisterOrImmediate, Source,
+ },
+ fat_pointer::FatPointer,
+ instruction::ExecutionStatus,
+ state::State,
+ ExecutionEnd, Instruction, VirtualMachine,
+};
+
+pub(crate) trait HeapFromState {
+ type Read: OpcodeType;
+ type Write: OpcodeType;
+
+ fn get_heap<T, W>(state: &State<T, W>) -> HeapId;
+ fn get_heap_size<T, W>(state: &mut State<T, W>) -> &mut u32;
+}
+
+pub(crate) struct Heap;
+
+impl HeapFromState for Heap {
+ type Read = opcodes::HeapRead;
+ type Write = opcodes::HeapWrite;
+
+ fn get_heap<T, W>(state: &State<T, W>) -> HeapId {
+ state.current_frame.heap
+ }
+
+ fn get_heap_size<T, W>(state: &mut State<T, W>) -> &mut u32 {
+ &mut state.current_frame.heap_size
+ }
+}
+
+pub(crate) struct AuxHeap;
+
+impl HeapFromState for AuxHeap {
+ type Read = opcodes::AuxHeapRead;
+ type Write = opcodes::AuxHeapWrite;
+
+ fn get_heap<T, W>(state: &State<T, W>) -> HeapId {
+ state.current_frame.aux_heap
+ }
+
+ fn get_heap_size<T, W>(state: &mut State<T, W>) -> &mut u32 {
+ &mut state.current_frame.aux_heap_size
+ }
+}
+
+/// The last address to which 32 can be added without overflow.
+const LAST_ADDRESS: u32 = u32::MAX - 32;
+
+// Necessary because the obvious code compiles to a comparison of two 256-bit numbers.
+#[inline(always)]
+fn bigger_than_last_address(x: U256) -> bool {
+ x.0[0] > LAST_ADDRESS.into() || x.0[1] != 0 || x.0[2] != 0 || x.0[3] != 0
+}
+
+fn load<T: Tracer, W, H: HeapFromState, In: Source, const INCREMENT: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<H::Read, _, _>(vm, world, tracer, |vm, args| {
+ // Pointers need not be masked here even though we do not care about them being pointers.
+ // They will panic, though because they are larger than 2^32.
+ let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state);
+
+ let address = pointer.low_u32();
+
+ let new_bound = address.wrapping_add(32);
+ if grow_heap::<_, _, H>(&mut vm.state, new_bound).is_err() {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ };
+
+ // The heap is always grown even when the index nonsensical.
+ // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
+ if bigger_than_last_address(pointer) {
+ let _ = vm.state.use_gas(u32::MAX);
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ }
+
+ let heap = H::get_heap(&vm.state);
+ let value = vm.state.heaps[heap].read_u256(address);
+ Register1::set(args, &mut vm.state, value);
+
+ if INCREMENT {
+ Register2::set(args, &mut vm.state, pointer + 32);
+ }
+ })
+}
+
+fn store<T, W, H, In, const INCREMENT: bool, const HOOKING_ENABLED: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus
+where
+ T: Tracer,
+ H: HeapFromState,
+ In: Source,
+{
+ full_boilerplate::<H::Write, _, _>(vm, world, tracer, |vm, args, _, _| {
+ // Pointers need not be masked here even though we do not care about them being pointers.
+ // They will panic, though because they are larger than 2^32.
+ let (pointer, _) = In::get_with_pointer_flag(args, &mut vm.state);
+
+ let address = pointer.low_u32();
+ let value = Register2::get(args, &mut vm.state);
+
+ let new_bound = address.wrapping_add(32);
+ if grow_heap::<_, _, H>(&mut vm.state, new_bound).is_err() {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return ExecutionStatus::Running;
+ }
+
+ // The heap is always grown even when the index nonsensical.
+ // TODO PLA-974 revert to not growing the heap on failure as soon as zk_evm is fixed
+ if bigger_than_last_address(pointer) {
+ let _ = vm.state.use_gas(u32::MAX);
+ vm.state.current_frame.pc = spontaneous_panic();
+ return ExecutionStatus::Running;
+ }
+
+ let heap = H::get_heap(&vm.state);
+ vm.state.heaps.write_u256(heap, address, value);
+
+ if INCREMENT {
+ Register1::set(args, &mut vm.state, pointer + 32);
+ }
+
+ if HOOKING_ENABLED && address == vm.settings.hook_address {
+ ExecutionStatus::Stopped(ExecutionEnd::SuspendedOnHook(value.as_u32()))
+ } else {
+ ExecutionStatus::Running
+ }
+ })
+}
+
+/// Pays for more heap space. Doesn't acually grow the heap.
+/// That distinction is necessary because the bootloader gets `u32::MAX` heap for free.
+pub(crate) fn grow_heap<T, W, H: HeapFromState>(
+ state: &mut State<T, W>,
+ new_bound: u32,
+) -> Result<(), ()> {
+ let already_paid = H::get_heap_size(state);
+ if *already_paid < new_bound {
+ let to_pay = new_bound - *already_paid;
+ *already_paid = new_bound;
+ state.use_gas(to_pay)?;
+ }
+
+ Ok(())
+}
+
+fn load_pointer<T: Tracer, W, const INCREMENT: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::PointerRead, _, _>(vm, world, tracer, |vm, args| {
+ let (input, input_is_pointer) = Register1::get_with_pointer_flag(args, &mut vm.state);
+ if !input_is_pointer {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ }
+ let pointer = FatPointer::from(input);
+
+ // Usually, we just read zeroes instead of out-of-bounds bytes
+ // but if offset + 32 is not representable, we panic, even if we could've read some bytes.
+ // This is not a bug, this is how it must work to be backwards compatible.
+ if pointer.offset > LAST_ADDRESS {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ };
+
+ let start = pointer.start + pointer.offset.min(pointer.length);
+ let end = start.saturating_add(32).min(pointer.start + pointer.length);
+
+ let value = vm.state.heaps[pointer.memory_page].read_u256_partially(start..end);
+ Register1::set(args, &mut vm.state, value);
+
+ if INCREMENT {
+ // This addition does not overflow because we checked that the offset is small enough above.
+ Register2::set_fat_ptr(args, &mut vm.state, input + 32);
+ }
+ })
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a [`HeapRead`](opcodes::HeapRead) instruction with the provided params.
+ pub fn from_heap_read(
+ src: RegisterOrImmediate,
+ out: Register1,
+ incremented_out: Option<Register2>,
+ arguments: Arguments,
+ ) -> Self {
+ Self::from_read::<Heap>(src, out, incremented_out, arguments)
+ }
+
+ /// Creates an [`AuxHeapRead`](opcodes::AuxHeapRead) instruction with the provided params.
+ pub fn from_aux_heap_read(
+ src: RegisterOrImmediate,
+ out: Register1,
+ incremented_out: Option<Register2>,
+ arguments: Arguments,
+ ) -> Self {
+ Self::from_read::<AuxHeap>(src, out, incremented_out, arguments)
+ }
+
+ fn from_read<H: HeapFromState>(
+ src: RegisterOrImmediate,
+ out: Register1,
+ incremented_out: Option<Register2>,
+ arguments: Arguments,
+ ) -> Self {
+ let mut arguments = arguments.write_source(&src).write_destination(&out);
+
+ let increment = incremented_out.is_some();
+ if let Some(out2) = incremented_out {
+ out2.write_destination(&mut arguments);
+ }
+
+ Self {
+ handler: monomorphize!(load [T W H] match_reg_imm src match_boolean increment),
+ arguments,
+ }
+ }
+
+ /// Creates a [`HeapWrite`](opcodes::HeapWrite) instruction with the provided params.
+ pub fn from_heap_write(
+ src1: RegisterOrImmediate,
+ src2: Register2,
+ incremented_out: Option<Register1>,
+ arguments: Arguments,
+ should_hook: bool,
+ ) -> Self {
+ Self::from_write::<Heap>(src1, src2, incremented_out, arguments, should_hook)
+ }
+
+ /// Creates an [`AuxHeapWrite`](opcodes::AuxHeapWrite) instruction with the provided params.
+ pub fn from_aux_heap_store(
+ src1: RegisterOrImmediate,
+ src2: Register2,
+ incremented_out: Option<Register1>,
+ arguments: Arguments,
+ ) -> Self {
+ Self::from_write::<AuxHeap>(src1, src2, incremented_out, arguments, false)
+ }
+
+ fn from_write<H: HeapFromState>(
+ src1: RegisterOrImmediate,
+ src2: Register2,
+ incremented_out: Option<Register1>,
+ arguments: Arguments,
+ should_hook: bool,
+ ) -> Self {
+ let increment = incremented_out.is_some();
+ Self {
+ handler: monomorphize!(store [T W H] match_reg_imm src1 match_boolean increment match_boolean should_hook),
+ arguments: arguments
+ .write_source(&src1)
+ .write_source(&src2)
+ .write_destination(&incremented_out),
+ }
+ }
+
+ /// Creates an [`PointerRead`](opcodes::PointerRead) instruction with the provided params.
+ pub fn from_pointer_read(
+ src: Register1,
+ out: Register1,
+ incremented_out: Option<Register2>,
+ arguments: Arguments,
+ ) -> Self {
+ let increment = incremented_out.is_some();
+ Self {
+ handler: monomorphize!(load_pointer [T W] match_boolean increment),
+ arguments: arguments
+ .write_source(&src)
+ .write_destination(&out)
+ .write_destination(&incremented_out),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +
use zksync_vm2_interface::{opcodes, Tracer};
+
+use super::{
+ common::boilerplate,
+ monomorphization::{match_source, monomorphize, parameterize},
+};
+use crate::{
+ addressing_modes::{
+ AbsoluteStack, AdvanceStackPointer, AnySource, Arguments, CodePage, Destination,
+ Immediate1, Register1, RelativeStack, Source,
+ },
+ instruction::{ExecutionStatus, Instruction},
+ VirtualMachine,
+};
+
+fn jump<T: Tracer, W, In: Source>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::Jump, _, _>(vm, world, tracer, |vm, args| {
+ #[allow(clippy::cast_possible_truncation)] // intentional
+ let target = In::get(args, &mut vm.state).low_u32() as u16;
+
+ let next_instruction = vm.state.current_frame.get_pc_as_u16();
+ Register1::set(args, &mut vm.state, next_instruction.into());
+
+ vm.state.current_frame.set_pc_from_u16(target);
+ })
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a [`Jump`](opcodes::Jump) instruction with the provided params.
+ pub fn from_jump(source: AnySource, destination: Register1, arguments: Arguments) -> Self {
+ Self {
+ handler: monomorphize!(jump [T W] match_source source),
+ arguments: arguments
+ .write_source(&source)
+ .write_destination(&destination),
+ }
+ }
+}
+
#[cfg(feature = "single_instruction_test")]
+pub(crate) use ret::spontaneous_panic;
+
+pub(crate) use self::{
+ context::address_into_u256,
+ heap_access::{AuxHeap, Heap},
+ ret::invalid_instruction,
+};
+
+mod binop;
+mod common;
+mod context;
+mod decommit;
+mod event;
+mod far_call;
+mod heap_access;
+mod jump;
+mod monomorphization;
+mod near_call;
+mod nop;
+mod pointer;
+mod precompiles;
+mod ret;
+mod storage;
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +
/// Select an instantiation of a generic function based on runtime variables
+/// For example `monomorphize!(load [H] match_reg_imm src match_boolean increment)`
+macro_rules! monomorphize {
+ ($function_name: ident [$($types: tt)*] $next_matcher: ident $($rest: ident)*) => {
+ $next_matcher!([$($types)*] $($rest)* parameterize $function_name)
+ };
+
+ ($function_name: ident $($rest: ident)*) => {
+ monomorphize!($function_name [] $($rest)*)
+ };
+}
+
+macro_rules! match_source {
+ ([ $($types: tt)* ] $input_type: ident $next_matcher: ident $($rest: ident)*) => {
+ match $input_type {
+ AnySource::Register1(_) => $next_matcher!([$($types)* Register1] $($rest)*),
+ AnySource::Immediate1(_) => $next_matcher!([$($types)* Immediate1] $($rest)*),
+ AnySource::AbsoluteStack(_) => $next_matcher!([$($types)* AbsoluteStack] $($rest)*),
+ AnySource::RelativeStack(_) => $next_matcher!([$($types)* RelativeStack] $($rest)*),
+ AnySource::AdvanceStackPointer(_) => $next_matcher!([$($types)* AdvanceStackPointer] $($rest)*),
+ AnySource::CodePage(_) => $next_matcher!([$($types)* CodePage] $($rest)*),
+ }
+ };
+}
+
+macro_rules! match_reg_imm {
+ ([ $($types: tt)* ] $input_type: ident $next_matcher: ident $($rest: ident)*) => {
+ match $input_type {
+ RegisterOrImmediate::Register1(_) => $next_matcher!([$($types)* Register1] $($rest)*),
+ RegisterOrImmediate::Immediate1(_) => $next_matcher!([$($types)* Immediate1] $($rest)*),
+ }
+ };
+}
+
+macro_rules! match_destination {
+ ([ $($types: tt)* ] $input_type: ident $next_matcher: ident $($rest: ident)*) => {
+ match $input_type {
+ AnyDestination::Register1(_) => $next_matcher!([$($types)* Register1] $($rest)*),
+ AnyDestination::AbsoluteStack(_) => $next_matcher!([$($types)* AbsoluteStack] $($rest)*),
+ AnyDestination::RelativeStack(_) => $next_matcher!([$($types)* RelativeStack] $($rest)*),
+ AnyDestination::AdvanceStackPointer(_) => $next_matcher!([$($types)* AdvanceStackPointer] $($rest)*),
+ }
+ };
+}
+
+macro_rules! match_boolean {
+ ([ $($types: tt)* ] $increment: ident $next_matcher: ident $($rest: ident)*) => {
+ if $increment {
+ $next_matcher!([$($types)* {true}] $($rest)*)
+ } else {
+ $next_matcher!([$($types)* {false}] $($rest)*)
+ }
+ };
+}
+
+macro_rules! parameterize {
+ ([$($types: tt)*] $function_name:ident) => {
+ $function_name::<$($types),*>
+ };
+}
+
+pub(crate) use match_boolean;
+pub(crate) use match_destination;
+pub(crate) use match_reg_imm;
+pub(crate) use match_source;
+pub(crate) use monomorphize;
+pub(crate) use parameterize;
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +
use zksync_vm2_interface::{opcodes, Tracer};
+
+use super::common::boilerplate;
+use crate::{
+ addressing_modes::{Arguments, Immediate1, Immediate2, Register1, Source},
+ instruction::ExecutionStatus,
+ predication::Flags,
+ Instruction, VirtualMachine,
+};
+
+fn near_call<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::NearCall, _, _>(vm, world, tracer, |vm, args| {
+ let gas_to_pass = Register1::get(args, &mut vm.state).low_u32();
+ let destination = Immediate1::get_u16(args);
+ let error_handler = Immediate2::get_u16(args);
+
+ let new_frame_gas = if gas_to_pass == 0 {
+ vm.state.current_frame.gas
+ } else {
+ gas_to_pass.min(vm.state.current_frame.gas)
+ };
+ vm.state.current_frame.push_near_call(
+ new_frame_gas,
+ error_handler,
+ vm.world_diff.snapshot(),
+ );
+
+ vm.state.flags = Flags::new(false, false, false);
+
+ vm.state.current_frame.set_pc_from_u16(destination);
+ })
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a [`NearCall`](opcodes::NearCall) instruction with the provided params.
+ pub fn from_near_call(
+ gas: Register1,
+ destination: Immediate1,
+ error_handler: Immediate2,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: near_call,
+ arguments: arguments
+ .write_source(&gas)
+ .write_source(&destination)
+ .write_source(&error_handler),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +
use zksync_vm2_interface::{opcodes, Tracer};
+
+use super::common::boilerplate;
+use crate::{
+ addressing_modes::{destination_stack_address, AdvanceStackPointer, Arguments, Source},
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine,
+};
+
+fn nop<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::Nop, _, _>(vm, world, tracer, |vm, args| {
+ // nop's addressing modes can move the stack pointer!
+ AdvanceStackPointer::get(args, &mut vm.state);
+ vm.state.current_frame.sp = vm
+ .state
+ .current_frame
+ .sp
+ .wrapping_add(destination_stack_address(args, &mut vm.state));
+ })
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a [`Nop`](opcodes::Nop) instruction with the provided params.
+ pub fn from_nop(
+ pop: AdvanceStackPointer,
+ push: AdvanceStackPointer,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: nop,
+ arguments: arguments.write_source(&pop).write_destination(&push),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +
use primitive_types::U256;
+use zksync_vm2_interface::{
+ opcodes::{PointerAdd, PointerPack, PointerShrink, PointerSub},
+ OpcodeType, Tracer,
+};
+
+use super::{
+ common::boilerplate,
+ monomorphization::{
+ match_boolean, match_destination, match_source, monomorphize, parameterize,
+ },
+ ret::spontaneous_panic,
+};
+use crate::{
+ addressing_modes::{
+ AbsoluteStack, AdvanceStackPointer, AnyDestination, AnySource, Arguments, CodePage,
+ Destination, Immediate1, Register1, Register2, RelativeStack, Source,
+ },
+ fat_pointer::FatPointer,
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine,
+};
+
+fn ptr<T: Tracer, W, Op: PtrOp, In1: Source, Out: Destination, const SWAP: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<Op, _, _>(vm, world, tracer, |vm, args| {
+ let ((a, a_is_pointer), (b, b_is_pointer)) = if SWAP {
+ (
+ Register2::get_with_pointer_flag(args, &mut vm.state),
+ In1::get_with_pointer_flag_and_erasing(args, &mut vm.state),
+ )
+ } else {
+ (
+ In1::get_with_pointer_flag(args, &mut vm.state),
+ Register2::get_with_pointer_flag_and_erasing(args, &mut vm.state),
+ )
+ };
+
+ if !a_is_pointer || b_is_pointer {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ }
+
+ let Some(result) = Op::perform(a, b) else {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ };
+
+ Out::set_fat_ptr(args, &mut vm.state, result);
+ })
+}
+
+pub(crate) trait PtrOp: OpcodeType {
+ fn perform(in1: U256, in2: U256) -> Option<U256>;
+}
+
+impl PtrOp for PointerAdd {
+ #[inline(always)]
+ fn perform(in1: U256, in2: U256) -> Option<U256> {
+ ptr_add_sub::<true>(in1, in2)
+ }
+}
+
+impl PtrOp for PointerSub {
+ #[inline(always)]
+ fn perform(in1: U256, in2: U256) -> Option<U256> {
+ ptr_add_sub::<false>(in1, in2)
+ }
+}
+
+fn ptr_add_sub<const IS_ADD: bool>(mut in1: U256, in2: U256) -> Option<U256> {
+ if in2 > u32::MAX.into() {
+ return None;
+ }
+ let pointer: &mut FatPointer = (&mut in1).into();
+
+ let new_offset = if IS_ADD {
+ pointer.offset.checked_add(in2.low_u32())
+ } else {
+ pointer.offset.checked_sub(in2.low_u32())
+ }?;
+
+ pointer.offset = new_offset;
+
+ Some(in1)
+}
+
+impl PtrOp for PointerPack {
+ #[inline(always)]
+ fn perform(in1: U256, in2: U256) -> Option<U256> {
+ if in2.low_u128() != 0 {
+ None
+ } else {
+ Some(U256([in1.0[0], in1.0[1], in2.0[2], in2.0[3]]))
+ }
+ }
+}
+
+impl PtrOp for PointerShrink {
+ #[inline(always)]
+ fn perform(mut in1: U256, in2: U256) -> Option<U256> {
+ let pointer: &mut FatPointer = (&mut in1).into();
+ pointer.length = pointer.length.checked_sub(in2.low_u32())?;
+ Some(in1)
+ }
+}
+
+macro_rules! from_ptr_op {
+ ($name:ident <$binop:ty>) => {
+ #[doc = concat!("Creates a [`", stringify!($binop), "`] instruction with the provided params.")]
+ pub fn $name(
+ src1: AnySource,
+ src2: Register2,
+ out: AnyDestination,
+ arguments: Arguments,
+ swap: bool,
+ ) -> Self {
+ Self::from_ptr::<$binop>(src1, src2, out, arguments, swap)
+ }
+ };
+}
+
+/// Pointer-related instructions.
+impl<T: Tracer, W> Instruction<T, W> {
+ from_ptr_op!(from_pointer_add<PointerAdd>);
+ from_ptr_op!(from_pointer_sub<PointerSub>);
+ from_ptr_op!(from_pointer_pack<PointerPack>);
+ from_ptr_op!(from_pointer_shrink<PointerShrink>);
+
+ pub(crate) fn from_ptr<Op: PtrOp>(
+ src1: AnySource,
+ src2: Register2,
+ out: AnyDestination,
+ arguments: Arguments,
+ swap: bool,
+ ) -> Self {
+ Self {
+ handler: monomorphize!(ptr [T W Op] match_source src1 match_destination out match_boolean swap),
+ arguments: arguments
+ .write_source(&src1)
+ .write_source(&src2)
+ .write_destination(&out),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +
use primitive_types::{H160, U256};
+use zk_evm_abstractions::{
+ aux::Timestamp,
+ precompiles::{
+ ecrecover::ecrecover_function, keccak256::keccak256_rounds_function,
+ secp256r1_verify::secp256r1_verify_function, sha256::sha256_rounds_function,
+ },
+ queries::LogQuery,
+ vm::Memory,
+};
+use zkevm_opcode_defs::{
+ system_params::{
+ ECRECOVER_INNER_FUNCTION_PRECOMPILE_ADDRESS, KECCAK256_ROUND_FUNCTION_PRECOMPILE_ADDRESS,
+ SECP256R1_VERIFY_PRECOMPILE_ADDRESS, SHA256_ROUND_FUNCTION_PRECOMPILE_ADDRESS,
+ },
+ PrecompileAuxData, PrecompileCallABI,
+};
+use zksync_vm2_interface::{opcodes, CycleStats, HeapId, Tracer};
+
+use super::{common::boilerplate_ext, ret::spontaneous_panic};
+use crate::{
+ addressing_modes::{Arguments, Destination, Register1, Register2, Source},
+ heap::Heaps,
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine,
+};
+
+fn precompile_call<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::PrecompileCall, _, _>(vm, world, tracer, |vm, args, _, tracer| {
+ // The user gets to decide how much gas to burn
+ // This is safe because system contracts are trusted
+ let aux_data = PrecompileAuxData::from_u256(Register2::get(args, &mut vm.state));
+ let Ok(()) = vm.state.use_gas(aux_data.extra_ergs_cost) else {
+ vm.state.current_frame.pc = spontaneous_panic();
+ return;
+ };
+
+ #[allow(clippy::cast_possible_wrap)]
+ {
+ vm.world_diff.pubdata.0 += aux_data.extra_pubdata_cost as i32;
+ }
+
+ let mut abi = PrecompileCallABI::from_u256(Register1::get(args, &mut vm.state));
+ if abi.memory_page_to_read == 0 {
+ abi.memory_page_to_read = vm.state.current_frame.heap.as_u32();
+ }
+ if abi.memory_page_to_write == 0 {
+ abi.memory_page_to_write = vm.state.current_frame.heap.as_u32();
+ }
+
+ let query = LogQuery {
+ timestamp: Timestamp(0),
+ key: abi.to_u256(),
+ // only two first fields are read by the precompile
+ tx_number_in_block: Default::default(),
+ aux_byte: Default::default(),
+ shard_id: Default::default(),
+ address: H160::default(),
+ read_value: U256::default(),
+ written_value: U256::default(),
+ rw_flag: Default::default(),
+ rollback: Default::default(),
+ is_service: Default::default(),
+ };
+
+ let address_bytes = vm.state.current_frame.address.0;
+ let address_low = u16::from_le_bytes([address_bytes[19], address_bytes[18]]);
+ let heaps = &mut vm.state.heaps;
+
+ #[allow(clippy::cast_possible_truncation)]
+ // if we're having `> u32::MAX` cycles, we've got larger issues
+ match address_low {
+ KECCAK256_ROUND_FUNCTION_PRECOMPILE_ADDRESS => {
+ tracer.on_extra_prover_cycles(CycleStats::Keccak256(
+ keccak256_rounds_function::<_, false>(0, query, heaps).0 as u32,
+ ));
+ }
+ SHA256_ROUND_FUNCTION_PRECOMPILE_ADDRESS => {
+ tracer.on_extra_prover_cycles(CycleStats::Sha256(
+ sha256_rounds_function::<_, false>(0, query, heaps).0 as u32,
+ ));
+ }
+ ECRECOVER_INNER_FUNCTION_PRECOMPILE_ADDRESS => {
+ tracer.on_extra_prover_cycles(CycleStats::EcRecover(
+ ecrecover_function::<_, false>(0, query, heaps).0 as u32,
+ ));
+ }
+ SECP256R1_VERIFY_PRECOMPILE_ADDRESS => {
+ tracer.on_extra_prover_cycles(CycleStats::Secp256r1Verify(
+ secp256r1_verify_function::<_, false>(0, query, heaps).0 as u32,
+ ));
+ }
+ _ => {
+ // A precompile call may be used just to burn gas
+ }
+ }
+
+ Register1::set(args, &mut vm.state, 1.into());
+ })
+}
+
+impl Memory for Heaps {
+ fn execute_partial_query(
+ &mut self,
+ _monotonic_cycle_counter: u32,
+ mut query: zk_evm_abstractions::queries::MemoryQuery,
+ ) -> zk_evm_abstractions::queries::MemoryQuery {
+ let page = HeapId::from_u32_unchecked(query.location.page.0);
+
+ let start = query.location.index.0 * 32;
+ if query.rw_flag {
+ self.write_u256(page, start, query.value);
+ } else {
+ query.value = self[page].read_u256(start);
+ query.value_is_pointer = false;
+ }
+ query
+ }
+
+ fn specialized_code_query(
+ &mut self,
+ _monotonic_cycle_counter: u32,
+ _query: zk_evm_abstractions::queries::MemoryQuery,
+ ) -> zk_evm_abstractions::queries::MemoryQuery {
+ todo!()
+ }
+
+ fn read_code_query(
+ &self,
+ _monotonic_cycle_counter: u32,
+ _query: zk_evm_abstractions::queries::MemoryQuery,
+ ) -> zk_evm_abstractions::queries::MemoryQuery {
+ todo!()
+ }
+}
+
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a [`PrecompileCall`](opcodes::PrecompileCall) instruction with the provided params.
+ pub fn from_precompile_call(
+ abi: Register1,
+ burn: Register2,
+ out: Register1,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ arguments: arguments
+ .write_source(&abi)
+ .write_source(&burn)
+ .write_destination(&out),
+ handler: precompile_call,
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +
use primitive_types::U256;
+use zksync_vm2_interface::{
+ opcodes::{self, Normal, Panic, Revert, TypeLevelReturnType},
+ ReturnType, Tracer,
+};
+
+use super::{
+ common::full_boilerplate,
+ far_call::get_far_call_calldata,
+ monomorphization::{match_boolean, monomorphize, parameterize},
+};
+use crate::{
+ addressing_modes::{Arguments, Immediate1, Register1, Source, INVALID_INSTRUCTION_COST},
+ callframe::FrameRemnant,
+ instruction::{ExecutionEnd, ExecutionStatus},
+ mode_requirements::ModeRequirements,
+ predication::Flags,
+ Instruction, Predicate, VirtualMachine,
+};
+
+fn naked_ret<T: Tracer, W, RT: TypeLevelReturnType, const TO_LABEL: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ args: &Arguments,
+) -> ExecutionStatus {
+ let mut return_type = RT::VALUE;
+ let near_call_leftover_gas = vm.state.current_frame.gas;
+
+ let (snapshot, leftover_gas) = if let Some(FrameRemnant {
+ exception_handler,
+ snapshot,
+ }) = vm.state.current_frame.pop_near_call()
+ {
+ if TO_LABEL {
+ let pc = Immediate1::get_u16(args);
+ vm.state.current_frame.set_pc_from_u16(pc);
+ } else if return_type.is_failure() {
+ vm.state.current_frame.set_pc_from_u16(exception_handler);
+ }
+
+ (snapshot, near_call_leftover_gas)
+ } else {
+ let return_value_or_panic = if return_type == ReturnType::Panic {
+ None
+ } else {
+ let (raw_abi, is_pointer) = Register1::get_with_pointer_flag(args, &mut vm.state);
+ let result = get_far_call_calldata(raw_abi, is_pointer, vm, false).filter(|pointer| {
+ vm.state.current_frame.is_kernel
+ || pointer.memory_page != vm.state.current_frame.calldata_heap
+ });
+
+ if result.is_none() {
+ return_type = ReturnType::Panic;
+ }
+ result
+ };
+
+ let leftover_gas = vm
+ .state
+ .current_frame
+ .gas
+ .saturating_sub(vm.state.current_frame.stipend);
+
+ let Some(FrameRemnant {
+ exception_handler,
+ snapshot,
+ }) = vm.pop_frame(
+ return_value_or_panic
+ .as_ref()
+ .map(|pointer| pointer.memory_page),
+ )
+ else {
+ // The initial frame is not rolled back, even if it fails.
+ // It is the caller's job to clean up when the execution as a whole fails because
+ // the caller may take external snapshots while the VM is in the initial frame and
+ // these would break were the initial frame to be rolled back.
+
+ // But to continue execution would be nonsensical and can cause UB because there
+ // is no next instruction after a panic arising from some other instruction.
+ vm.state.current_frame.pc = invalid_instruction();
+
+ return if let Some(return_value) = return_value_or_panic {
+ let output = vm.state.heaps[return_value.memory_page]
+ .read_range_big_endian(
+ return_value.start..return_value.start + return_value.length,
+ )
+ .clone();
+ if return_type == ReturnType::Revert {
+ ExecutionStatus::Stopped(ExecutionEnd::Reverted(output))
+ } else {
+ ExecutionStatus::Stopped(ExecutionEnd::ProgramFinished(output))
+ }
+ } else {
+ ExecutionStatus::Stopped(ExecutionEnd::Panicked)
+ };
+ };
+
+ vm.state.set_context_u128(0);
+ vm.state.registers = [U256::zero(); 16];
+
+ if let Some(return_value) = return_value_or_panic {
+ vm.state.registers[1] = return_value.into_u256();
+ }
+ vm.state.register_pointer_flags = 2;
+
+ if return_type.is_failure() {
+ vm.state.current_frame.set_pc_from_u16(exception_handler);
+ }
+
+ (snapshot, leftover_gas)
+ };
+
+ if return_type.is_failure() {
+ vm.world_diff.rollback(snapshot);
+ }
+
+ vm.state.flags = Flags::new(return_type == ReturnType::Panic, false, false);
+ vm.state.current_frame.gas += leftover_gas;
+
+ ExecutionStatus::Running
+}
+
+fn ret<T: Tracer, W, RT: TypeLevelReturnType, const TO_LABEL: bool>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ full_boilerplate::<opcodes::Ret<RT>, _, _>(vm, world, tracer, |vm, args, _, _| {
+ naked_ret::<T, W, RT, TO_LABEL>(vm, args)
+ })
+}
+
+/// Turn the current instruction into a panic at no extra cost. (Great value, I know.)
+///
+/// Call this when:
+/// - gas runs out when paying for the fixed cost of an instruction
+/// - causing side effects in a static context
+/// - using privileged instructions while not in a system call
+/// - the far call stack overflows
+///
+/// For all other panics, point the instruction pointer at [PANIC] instead.
+pub(crate) fn free_panic<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ tracer.before_instruction::<opcodes::Ret<Panic>, _>(vm);
+ // args aren't used for panics unless TO_LABEL
+ let result = naked_ret::<T, W, Panic, false>(
+ vm,
+ &Arguments::new(Predicate::Always, 0, ModeRequirements::none()),
+ );
+ tracer.after_instruction::<opcodes::Ret<Panic>, _>(vm);
+ result
+}
+
+/// Formally, a far call pushes a new frame and returns from it immediately if it panics.
+/// This function instead panics without popping a frame to save on allocation.
+pub(crate) fn panic_from_failed_far_call<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ tracer: &mut T,
+ exception_handler: u16,
+) {
+ tracer.before_instruction::<opcodes::Ret<Panic>, _>(vm);
+
+ // Gas is already subtracted in the far call code.
+ // No need to roll back, as no changes are made in this "frame".
+ vm.state.set_context_u128(0);
+ vm.state.registers = [U256::zero(); 16];
+ vm.state.register_pointer_flags = 2;
+ vm.state.flags = Flags::new(true, false, false);
+ vm.state.current_frame.set_pc_from_u16(exception_handler);
+
+ tracer.after_instruction::<opcodes::Ret<Panic>, _>(vm);
+}
+
+fn invalid<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ _: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ vm.state.current_frame.gas = 0;
+ free_panic(vm, tracer)
+}
+
+trait GenericStatics<T, W> {
+ const PANIC: Instruction<T, W>;
+ const INVALID: Instruction<T, W>;
+}
+
+impl<T: Tracer, W> GenericStatics<T, W> for () {
+ const PANIC: Instruction<T, W> = Instruction {
+ handler: ret::<T, W, Panic, false>,
+ arguments: Arguments::new(Predicate::Always, RETURN_COST, ModeRequirements::none()),
+ };
+ const INVALID: Instruction<T, W> = Instruction::from_invalid();
+}
+
+// The following functions return references that live for 'static.
+// They aren't marked as such because returning any lifetime is more ergonomic.
+
+/// Point the program counter at this instruction when a panic occurs during the logic of and instruction.
+pub(crate) fn spontaneous_panic<'a, T: Tracer, W>() -> &'a Instruction<T, W> {
+ &<()>::PANIC
+}
+
+/// Panics, burning all available gas.
+pub(crate) fn invalid_instruction<'a, T: Tracer, W>() -> &'a Instruction<T, W> {
+ &<()>::INVALID
+}
+
+pub(crate) const RETURN_COST: u32 = 5;
+
+/// Variations of [`Ret`](opcodes::Ret) instructions.
+impl<T: Tracer, W> Instruction<T, W> {
+ /// Creates a normal [`Ret`](opcodes::Ret) instruction with the provided params.
+ pub fn from_ret(src1: Register1, label: Option<Immediate1>, arguments: Arguments) -> Self {
+ let to_label = label.is_some();
+ Self {
+ handler: monomorphize!(ret [T W Normal] match_boolean to_label),
+ arguments: arguments.write_source(&src1).write_source(&label),
+ }
+ }
+
+ /// Creates a revert [`Ret`](opcodes::Ret) instruction with the provided params.
+ pub fn from_revert(src1: Register1, label: Option<Immediate1>, arguments: Arguments) -> Self {
+ let to_label = label.is_some();
+ Self {
+ handler: monomorphize!(ret [T W Revert] match_boolean to_label),
+ arguments: arguments.write_source(&src1).write_source(&label),
+ }
+ }
+
+ /// Creates a panic [`Ret`](opcodes::Ret) instruction with the provided params.
+ pub fn from_panic(label: Option<Immediate1>, arguments: Arguments) -> Self {
+ let to_label = label.is_some();
+ Self {
+ handler: monomorphize!(ret [T W Panic] match_boolean to_label),
+ arguments: arguments.write_source(&label),
+ }
+ }
+
+ /// Creates a *invalid* instruction that will panic by draining all gas.
+ pub const fn from_invalid() -> Self {
+ Self {
+ handler: invalid,
+ arguments: Arguments::new(
+ Predicate::Always,
+ INVALID_INSTRUCTION_COST,
+ ModeRequirements::none(),
+ ),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +
use zksync_vm2_interface::{opcodes, Tracer};
+
+use super::common::{boilerplate, boilerplate_ext};
+use crate::{
+ addressing_modes::{
+ Arguments, Destination, Register1, Register2, Source, SLOAD_COST, SSTORE_COST,
+ },
+ instruction::ExecutionStatus,
+ Instruction, VirtualMachine, World,
+};
+
+fn sstore<T: Tracer, W: World<T>>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::StorageWrite, _, _>(vm, world, tracer, |vm, args, world, tracer| {
+ let key = Register1::get(args, &mut vm.state);
+ let value = Register2::get(args, &mut vm.state);
+
+ let refund =
+ vm.world_diff
+ .write_storage(world, tracer, vm.state.current_frame.address, key, value);
+
+ assert!(refund <= SSTORE_COST);
+ vm.state.current_frame.gas += refund;
+ })
+}
+
+fn sstore_transient<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::TransientStorageWrite, _, _>(vm, world, tracer, |vm, args| {
+ let key = Register1::get(args, &mut vm.state);
+ let value = Register2::get(args, &mut vm.state);
+
+ vm.world_diff
+ .write_transient_storage(vm.state.current_frame.address, key, value);
+ })
+}
+
+fn sload<T: Tracer, W: World<T>>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate_ext::<opcodes::StorageRead, _, _>(vm, world, tracer, |vm, args, world, tracer| {
+ let key = Register1::get(args, &mut vm.state);
+
+ let (value, refund) =
+ vm.world_diff
+ .read_storage(world, tracer, vm.state.current_frame.address, key);
+
+ assert!(refund <= SLOAD_COST);
+ vm.state.current_frame.gas += refund;
+
+ Register1::set(args, &mut vm.state, value);
+ })
+}
+
+fn sload_transient<T: Tracer, W>(
+ vm: &mut VirtualMachine<T, W>,
+ world: &mut W,
+ tracer: &mut T,
+) -> ExecutionStatus {
+ boilerplate::<opcodes::TransientStorageRead, _, _>(vm, world, tracer, |vm, args| {
+ let key = Register1::get(args, &mut vm.state);
+ let value = vm
+ .world_diff
+ .read_transient_storage(vm.state.current_frame.address, key);
+
+ Register1::set(args, &mut vm.state, value);
+ })
+}
+
+impl<T: Tracer, W: World<T>> Instruction<T, W> {
+ /// Creates a [`StorageWrite`](opcodes::StorageWrite) instruction with the provided params.
+ pub fn from_storage_write(src1: Register1, src2: Register2, arguments: Arguments) -> Self {
+ Self {
+ handler: sstore,
+ arguments: arguments.write_source(&src1).write_source(&src2),
+ }
+ }
+
+ /// Creates a [`TransientStorageWrite`](opcodes::TransientStorageWrite) instruction with the provided params.
+ pub fn from_transient_storage_write(
+ src1: Register1,
+ src2: Register2,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: sstore_transient,
+ arguments: arguments.write_source(&src1).write_source(&src2),
+ }
+ }
+
+ /// Creates a [`StorageRead`](opcodes::StorageRead) instruction with the provided params.
+ pub fn from_storage_read(src: Register1, dst: Register1, arguments: Arguments) -> Self {
+ Self {
+ handler: sload,
+ arguments: arguments.write_source(&src).write_destination(&dst),
+ }
+ }
+
+ /// Creates a [`TransientStorageRead`](opcodes::TransientStorageRead) instruction with the provided params.
+ pub fn from_transient_storage_read(
+ src: Register1,
+ dst: Register1,
+ arguments: Arguments,
+ ) -> Self {
+ Self {
+ handler: sload_transient,
+ arguments: arguments.write_source(&src).write_destination(&dst),
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +
//! # High-Performance ZKsync Era VM
+//!
+//! This crate provides high-performance [`VirtualMachine`] for ZKsync Era.
+
+use std::hash::{DefaultHasher, Hash, Hasher};
+
+use primitive_types::{H160, U256};
+pub use zksync_vm2_interface as interface;
+use zksync_vm2_interface::Tracer;
+
+// Re-export missing modules if single instruction testing is enabled
+#[cfg(feature = "single_instruction_test")]
+pub(crate) use self::single_instruction_test::{heap, program, stack};
+pub use self::{
+ fat_pointer::FatPointer,
+ instruction::{ExecutionEnd, Instruction},
+ mode_requirements::ModeRequirements,
+ predication::Predicate,
+ program::Program,
+ vm::{Settings, VirtualMachine},
+ world_diff::{Snapshot, StorageChange, WorldDiff},
+};
+
+pub mod addressing_modes;
+#[cfg(not(feature = "single_instruction_test"))]
+mod bitset;
+mod callframe;
+mod decode;
+mod decommit;
+mod fat_pointer;
+#[cfg(not(feature = "single_instruction_test"))]
+mod heap;
+mod instruction;
+mod instruction_handlers;
+mod mode_requirements;
+mod predication;
+#[cfg(not(feature = "single_instruction_test"))]
+mod program;
+mod rollback;
+#[cfg(feature = "single_instruction_test")]
+pub mod single_instruction_test;
+#[cfg(not(feature = "single_instruction_test"))]
+mod stack;
+mod state;
+pub mod testonly;
+#[cfg(all(test, not(feature = "single_instruction_test")))]
+mod tests;
+mod tracing;
+mod vm;
+mod world_diff;
+
+/// VM storage access operations.
+pub trait StorageInterface {
+ /// Reads the specified slot from the storage.
+ ///
+ /// There is no write counterpart; [`WorldDiff::get_storage_changes()`] gives a list of all storage changes.
+ fn read_storage(&mut self, contract: H160, key: U256) -> Option<U256>;
+
+ /// Computes the cost of writing a storage slot.
+ fn cost_of_writing_storage(&mut self, initial_value: Option<U256>, new_value: U256) -> u32;
+
+ /// Returns if the storage slot is free both in terms of gas and pubdata.
+ fn is_free_storage_slot(&self, contract: &H160, key: &U256) -> bool;
+}
+
+/// Encapsulates VM interaction with the external world. This includes VM storage and decomitting (loading) bytecodes
+/// for execution.
+pub trait World<T: Tracer>: StorageInterface + Sized {
+ /// Loads a bytecode with the specified hash.
+ ///
+ /// This method will be called *every* time a contract is called. Caching and decoding is
+ /// the world implementor's job.
+ fn decommit(&mut self, hash: U256) -> Program<T, Self>;
+
+ /// Loads bytecode bytes for the `decommit` opcode.
+ fn decommit_code(&mut self, hash: U256) -> Vec<u8>;
+}
+
+/// Deterministic (across program runs and machines) hash that can be used for `Debug` implementations
+/// to concisely represent large amounts of data.
+#[cfg_attr(feature = "single_instruction_test", allow(dead_code))] // Currently used entirely in types overridden by `single_instruction_test` feature
+pub(crate) fn hash_for_debugging(value: &impl Hash) -> u64 {
+ let mut hasher = DefaultHasher::new();
+ value.hash(&mut hasher);
+ hasher.finish()
+}
+
/// VM execution mode requirements (kernel only, not in static call) that can be placed on instructions.
+#[derive(Debug, Clone, Copy)]
+pub struct ModeRequirements(pub(crate) u8);
+
+impl ModeRequirements {
+ /// Creates new requirements.
+ pub const fn new(kernel_only: bool, cannot_use_in_static: bool) -> Self {
+ Self((kernel_only as u8) | ((cannot_use_in_static as u8) << 1))
+ }
+
+ /// Creates default requirements that always hold.
+ pub const fn none() -> Self {
+ Self::new(false, false)
+ }
+
+ pub(crate) fn met(self, is_kernel: bool, is_static: bool) -> bool {
+ let enabled_modes = u8::from(is_kernel) | (u8::from(!is_static) << 1);
+ enabled_modes & self.0 == self.0
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +
const LT_BIT: u8 = 1;
+const EQ_BIT: u8 = 1 << 1;
+const GT_BIT: u8 = 1 << 2;
+const ALWAYS_BIT: u8 = 1 << 3;
+
+#[derive(Debug, Clone, PartialEq)]
+pub(crate) struct Flags(u8);
+
+impl Flags {
+ pub(crate) fn new(lt_of: bool, eq: bool, gt: bool) -> Self {
+ Flags(u8::from(lt_of) | (u8::from(eq) << 1) | (u8::from(gt) << 2) | ALWAYS_BIT)
+ }
+}
+
+/// Predicate for an instruction. Encoded so that comparing it to flags is efficient.
+#[derive(Copy, Clone, Debug, Default, Hash)]
+#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
+#[repr(u8)]
+pub enum Predicate {
+ /// Always execute the associated instruction.
+ #[default]
+ Always = ALWAYS_BIT,
+ /// Execute the associated instruction if the "greater than" execution flag is set.
+ IfGT = GT_BIT,
+ /// Execute the associated instruction if the "equal" execution flag is set.
+ IfEQ = EQ_BIT,
+ /// Execute the associated instruction if the "less than" execution flag is set.
+ IfLT = LT_BIT,
+ /// Execute the associated instruction if either of "greater than" or "equal" execution flags are set.
+ IfGE = GT_BIT | EQ_BIT,
+ /// Execute the associated instruction if either of "less than" or "equal" execution flags are set.
+ IfLE = LT_BIT | EQ_BIT,
+ /// Execute the associated instruction if the "equal" execution flag is not set.
+ IfNotEQ = EQ_BIT << 4 | ALWAYS_BIT,
+ /// Execute the associated instruction if either of "less than" or "greater than" execution flags are set.
+ IfGTOrLT = GT_BIT | LT_BIT,
+}
+
+impl Predicate {
+ #[inline(always)]
+ pub(crate) fn satisfied(self, flags: &Flags) -> bool {
+ let bits = self as u8;
+ bits & flags.0 != 0 && (bits >> 4) & flags.0 == 0
+ }
+}
+
+#[cfg(feature = "single_instruction_test")]
+impl From<&Flags> for zk_evm::flags::Flags {
+ fn from(flags: &Flags) -> Self {
+ zk_evm::flags::Flags {
+ overflow_or_less_than_flag: flags.0 & LT_BIT != 0,
+ equality_flag: flags.0 & EQ_BIT != 0,
+ greater_than_flag: flags.0 & GT_BIT != 0,
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +
use std::{fmt, sync::Arc};
+
+use primitive_types::U256;
+use zksync_vm2_interface::Tracer;
+
+use crate::{
+ addressing_modes::Arguments, decode::decode, hash_for_debugging, instruction::ExecutionStatus,
+ Instruction, ModeRequirements, Predicate, VirtualMachine, World,
+};
+
+/// Compiled EraVM bytecode.
+///
+/// Cloning this is cheap. It is a handle to memory similar to [`Arc`].
+pub struct Program<T, W> {
+ // An internal representation that doesn't need two Arcs would be better
+ // but it would also require a lot of unsafe, so I made this wrapper to
+ // enable changing the internals later.
+ code_page: Arc<[U256]>,
+ instructions: Arc<[Instruction<T, W>]>,
+}
+
+impl<T, W> Clone for Program<T, W> {
+ fn clone(&self) -> Self {
+ Self {
+ code_page: self.code_page.clone(),
+ instructions: self.instructions.clone(),
+ }
+ }
+}
+
+impl<T, W> fmt::Debug for Program<T, W> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ const DEBUGGED_ITEMS: usize = 16;
+
+ let mut s = formatter.debug_struct("Program");
+ if self.code_page.len() <= DEBUGGED_ITEMS {
+ s.field("code_page", &self.code_page);
+ } else {
+ s.field("code_page.len", &self.code_page.len())
+ .field("code_page.start", &&self.code_page[..DEBUGGED_ITEMS])
+ .field("code_page.hash", &hash_for_debugging(&self.code_page));
+ }
+
+ if self.instructions.len() <= DEBUGGED_ITEMS {
+ s.field("instructions", &self.instructions);
+ } else {
+ s.field("instructions.len", &self.instructions.len())
+ .field("instructions.start", &&self.instructions[..DEBUGGED_ITEMS]);
+ }
+ s.finish_non_exhaustive()
+ }
+}
+
+impl<T: Tracer, W: World<T>> Program<T, W> {
+ /// Creates a new program.
+ #[allow(clippy::missing_panics_doc)] // false positive
+ pub fn new(bytecode: &[u8], enable_hooks: bool) -> Self {
+ let instructions = decode_program(
+ &bytecode
+ .chunks_exact(8)
+ .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap()))
+ .collect::<Vec<_>>(),
+ enable_hooks,
+ );
+ let code_page = bytecode
+ .chunks_exact(32)
+ .map(U256::from_big_endian)
+ .collect::<Vec<_>>();
+ Self {
+ instructions: instructions.into(),
+ code_page: code_page.into(),
+ }
+ }
+
+ /// Creates a new program from `U256` words.
+ pub fn from_words(bytecode_words: Vec<U256>, enable_hooks: bool) -> Self {
+ let instructions = decode_program(
+ &bytecode_words
+ .iter()
+ .flat_map(|x| x.0.into_iter().rev())
+ .collect::<Vec<_>>(),
+ enable_hooks,
+ );
+ Self {
+ instructions: instructions.into(),
+ code_page: bytecode_words.into(),
+ }
+ }
+
+ #[doc(hidden)] // should only be used in low-level tests / benchmarks
+ pub fn from_raw(instructions: Vec<Instruction<T, W>>, code_page: Vec<U256>) -> Self {
+ Self {
+ instructions: instructions.into(),
+ code_page: code_page.into(),
+ }
+ }
+}
+
+impl<T, W> Program<T, W> {
+ pub(crate) fn instruction(&self, n: u16) -> Option<&Instruction<T, W>> {
+ self.instructions.get::<usize>(n.into())
+ }
+
+ /// Returns a reference to the code page of this program.
+ pub fn code_page(&self) -> &[U256] {
+ &self.code_page
+ }
+}
+
+// This implementation compares pointers instead of programs.
+//
+// That works well enough for the tests that this is written for.
+// I don't want to implement PartialEq for Instruction because
+// comparing function pointers can work in suprising ways.
+impl<T, W> PartialEq for Program<T, W> {
+ fn eq(&self, other: &Self) -> bool {
+ Arc::ptr_eq(&self.code_page, &other.code_page)
+ && Arc::ptr_eq(&self.instructions, &other.instructions)
+ }
+}
+
+/// Wraparound instruction placed at the end of programs exceeding `1 << 16` instructions to simulate the 16-bit program counter overflowing.
+/// Does not invoke tracers because it is an implementation detail, not an actual instruction.
+fn jump_to_beginning<T, W>() -> Instruction<T, W> {
+ Instruction {
+ handler: jump_to_beginning_handler,
+ arguments: Arguments::new(Predicate::Always, 0, ModeRequirements::none()),
+ }
+}
+
+fn jump_to_beginning_handler<T, W>(
+ vm: &mut VirtualMachine<T, W>,
+ _: &mut W,
+ _: &mut T,
+) -> ExecutionStatus {
+ let first_instruction = vm.state.current_frame.program.instruction(0).unwrap();
+ vm.state.current_frame.pc = first_instruction;
+ ExecutionStatus::Running
+}
+
+fn decode_program<T: Tracer, W: World<T>>(
+ raw: &[u64],
+ is_bootloader: bool,
+) -> Vec<Instruction<T, W>> {
+ raw.iter()
+ .take(1 << 16)
+ .map(|i| decode(*i, is_bootloader))
+ .chain(std::iter::once(if raw.len() >= 1 << 16 {
+ jump_to_beginning()
+ } else {
+ Instruction::from_invalid()
+ }))
+ .collect()
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +
use std::collections::{BTreeMap, BTreeSet};
+
+/// A trait for things that can be rolled back to snapshots
+pub(crate) trait Rollback {
+ type Snapshot;
+ fn snapshot(&self) -> Self::Snapshot;
+ fn rollback(&mut self, snapshot: Self::Snapshot);
+ fn delete_history(&mut self);
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct RollbackableMap<K: Ord, V> {
+ map: BTreeMap<K, V>,
+ old_entries: Vec<(K, Option<V>)>,
+}
+
+impl<K: Ord + Clone, V: Clone> RollbackableMap<K, V> {
+ pub(crate) fn insert(&mut self, key: K, value: V) -> Option<V> {
+ let old_value = self.map.insert(key.clone(), value);
+ self.old_entries.push((key, old_value.clone()));
+ old_value
+ }
+
+ pub(crate) fn changes_after(
+ &self,
+ snapshot: <Self as Rollback>::Snapshot,
+ ) -> BTreeMap<K, (Option<V>, V)> {
+ let mut changes = BTreeMap::new();
+ for (key, old_value) in self.old_entries[snapshot..].iter().rev() {
+ changes
+ .entry(key.clone())
+ .and_modify(|(old, _): &mut (Option<V>, V)| old.clone_from(old_value))
+ .or_insert((old_value.clone(), self.map.get(key).unwrap().clone()));
+ }
+ changes
+ }
+}
+
+impl<K: Ord, V> Rollback for RollbackableMap<K, V> {
+ type Snapshot = usize;
+
+ fn snapshot(&self) -> Self::Snapshot {
+ self.old_entries.len()
+ }
+
+ fn rollback(&mut self, snapshot: Self::Snapshot) {
+ for (k, v) in self.old_entries.drain(snapshot..).rev() {
+ if let Some(old_value) = v {
+ self.map.insert(k, old_value);
+ } else {
+ self.map.remove(&k);
+ }
+ }
+ }
+
+ fn delete_history(&mut self) {
+ self.old_entries.clear();
+ }
+}
+
+impl<K: Ord, V> AsRef<BTreeMap<K, V>> for RollbackableMap<K, V> {
+ fn as_ref(&self) -> &BTreeMap<K, V> {
+ &self.map
+ }
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct RollbackableSet<K: Ord> {
+ map: BTreeSet<K>,
+ old_entries: Vec<K>,
+}
+
+impl<T: Ord + Clone> RollbackableSet<T> {
+ /// Adds `key` to the set and returns if it was added (not present earlier).
+ pub(crate) fn add(&mut self, key: T) -> bool {
+ let is_new = self.map.insert(key.clone());
+ if is_new {
+ self.old_entries.push(key);
+ }
+ is_new
+ }
+}
+
+impl<K: Ord> Rollback for RollbackableSet<K> {
+ type Snapshot = usize;
+
+ fn snapshot(&self) -> Self::Snapshot {
+ self.old_entries.len()
+ }
+
+ fn rollback(&mut self, snapshot: Self::Snapshot) {
+ for k in self.old_entries.drain(snapshot..) {
+ self.map.remove(&k);
+ }
+ }
+
+ fn delete_history(&mut self) {
+ self.old_entries.clear();
+ }
+}
+
+impl<K: Ord> AsRef<BTreeSet<K>> for RollbackableSet<K> {
+ fn as_ref(&self) -> &BTreeSet<K> {
+ &self.map
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct RollbackableLog<T> {
+ entries: Vec<T>,
+}
+
+impl<T> Default for RollbackableLog<T> {
+ fn default() -> Self {
+ Self {
+ entries: Vec::default(),
+ }
+ }
+}
+
+impl<T> Rollback for RollbackableLog<T> {
+ type Snapshot = usize;
+
+ fn snapshot(&self) -> Self::Snapshot {
+ self.entries.len()
+ }
+
+ fn rollback(&mut self, snapshot: Self::Snapshot) {
+ self.entries.truncate(snapshot);
+ }
+
+ fn delete_history(&mut self) {}
+}
+
+impl<T> RollbackableLog<T> {
+ pub(crate) fn push(&mut self, entry: T) {
+ self.entries.push(entry);
+ }
+
+ pub(crate) fn logs_after(&self, snapshot: <RollbackableLog<T> as Rollback>::Snapshot) -> &[T] {
+ &self.entries[snapshot..]
+ }
+}
+
+impl<T> AsRef<[T]> for RollbackableLog<T> {
+ fn as_ref(&self) -> &[T] {
+ &self.entries
+ }
+}
+
+/// Rollbackable Plain Old Data simply stores copies of itself in snapshots.
+#[derive(Debug, Default, Copy, Clone)]
+pub(crate) struct RollbackablePod<T: Copy>(pub(crate) T);
+
+impl<T: Copy> Rollback for RollbackablePod<T> {
+ type Snapshot = T;
+
+ fn snapshot(&self) -> Self::Snapshot {
+ self.0
+ }
+
+ fn rollback(&mut self, snapshot: Self::Snapshot) {
+ self.0 = snapshot;
+ }
+
+ fn delete_history(&mut self) {}
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +
use std::{
+ alloc::{alloc, alloc_zeroed, Layout},
+ fmt,
+};
+
+use primitive_types::U256;
+
+use crate::{bitset::Bitset, fat_pointer::FatPointer, hash_for_debugging};
+
+#[derive(PartialEq)]
+pub(crate) struct Stack {
+ /// set of slots that may be interpreted as [`FatPointer`].
+ pointer_flags: Bitset,
+ dirty_areas: u64,
+ slots: [U256; 1 << 16],
+}
+
+const NUMBER_OF_DIRTY_AREAS: usize = 64;
+const DIRTY_AREA_SIZE: usize = (1 << 16) / NUMBER_OF_DIRTY_AREAS;
+
+impl Stack {
+ #[allow(clippy::cast_ptr_alignment)] // aligned per `Stack` layout
+ pub(crate) fn new() -> Box<Self> {
+ unsafe { Box::from_raw(alloc_zeroed(Layout::new::<Self>()).cast()) }
+ }
+
+ #[inline(always)]
+ pub(crate) fn get(&self, slot: u16) -> U256 {
+ self.slots[slot as usize]
+ }
+
+ #[inline(always)]
+ pub(crate) fn set(&mut self, slot: u16, value: U256) {
+ let written_area = slot as usize / DIRTY_AREA_SIZE;
+ self.dirty_areas |= 1 << written_area;
+
+ self.slots[slot as usize] = value;
+ }
+
+ fn zero(&mut self) {
+ for i in 0..NUMBER_OF_DIRTY_AREAS {
+ if self.dirty_areas & (1 << i) != 0 {
+ for slot in &mut self.slots[i * DIRTY_AREA_SIZE..(i + 1) * DIRTY_AREA_SIZE] {
+ *slot = U256::zero();
+ }
+ }
+ }
+
+ self.dirty_areas = 0;
+ self.pointer_flags = Bitset::default();
+ }
+
+ #[inline(always)]
+ pub(crate) fn get_pointer_flag(&self, slot: u16) -> bool {
+ self.pointer_flags.get(slot)
+ }
+
+ #[inline(always)]
+ pub(crate) fn set_pointer_flag(&mut self, slot: u16) {
+ self.pointer_flags.set(slot);
+ }
+
+ #[inline(always)]
+ pub(crate) fn clear_pointer_flag(&mut self, slot: u16) {
+ self.pointer_flags.clear(slot);
+ }
+
+ pub(crate) fn snapshot(&self) -> StackSnapshot {
+ let dirty_prefix_end = NUMBER_OF_DIRTY_AREAS - self.dirty_areas.leading_zeros() as usize;
+
+ StackSnapshot {
+ pointer_flags: self.pointer_flags.clone(),
+ dirty_areas: self.dirty_areas,
+ slots: self.slots[..DIRTY_AREA_SIZE * dirty_prefix_end].into(),
+ }
+ }
+
+ pub(crate) fn rollback(&mut self, snapshot: StackSnapshot) {
+ let StackSnapshot {
+ pointer_flags,
+ dirty_areas,
+ slots,
+ } = snapshot;
+
+ self.zero();
+
+ self.pointer_flags = pointer_flags;
+ self.dirty_areas = dirty_areas;
+ self.slots[..slots.len()].copy_from_slice(&slots);
+ }
+}
+
+pub(crate) struct StackSnapshot {
+ pointer_flags: Bitset,
+ dirty_areas: u64,
+ slots: Box<[U256]>,
+}
+
+impl Clone for Box<Stack> {
+ fn clone(&self) -> Self {
+ unsafe {
+ let allocation = alloc(Layout::for_value(&**self)).cast();
+ std::ptr::copy_nonoverlapping(&**self, allocation, 1);
+ Box::from_raw(allocation)
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct StackPool {
+ stacks: Vec<Box<Stack>>,
+}
+
+impl StackPool {
+ pub(crate) fn get(&mut self) -> Box<Stack> {
+ self.stacks.pop().map_or_else(Stack::new, |mut s| {
+ s.zero();
+ s
+ })
+ }
+
+ pub(crate) fn recycle(&mut self, stack: Box<Stack>) {
+ self.stacks.push(stack);
+ }
+}
+
+// region:Debug implementations
+
+/// Helper wrapper for debugging [`Stack`] / [`StackSnapshot`] contents.
+struct StackStart<I>(I);
+
+impl<I: Iterator<Item = (bool, U256)> + Clone> fmt::Debug for StackStart<I> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut list = formatter.debug_list();
+ for (is_pointer, slot) in self.0.clone() {
+ if is_pointer {
+ list.entry(&FatPointer::from(slot));
+ } else {
+ list.entry(&slot);
+ }
+ }
+ list.finish()
+ }
+}
+
+impl fmt::Debug for Stack {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ const DEBUGGED_SLOTS: usize = 256;
+
+ let slots = (0_u16..)
+ .zip(&self.slots)
+ .map(|(idx, slot)| (self.pointer_flags.get(idx), *slot))
+ .take(DEBUGGED_SLOTS);
+ formatter
+ .debug_struct("Stack")
+ .field("start", &StackStart(slots))
+ .field(
+ "pointer_flags.hash",
+ &hash_for_debugging(&self.pointer_flags),
+ )
+ .field("slots.hash", &hash_for_debugging(&self.slots))
+ .finish_non_exhaustive()
+ }
+}
+
+impl fmt::Debug for StackSnapshot {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ const DEBUGGED_SLOTS: usize = 256;
+
+ let slots = (0_u16..)
+ .zip(&self.slots[..])
+ .map(|(idx, slot)| (self.pointer_flags.get(idx), *slot))
+ .take(DEBUGGED_SLOTS);
+ formatter
+ .debug_struct("StackSnapshot")
+ .field("dirty_areas", &self.dirty_areas)
+ .field("start", &StackStart(slots))
+ .field(
+ "pointer_flags.hash",
+ &hash_for_debugging(&self.pointer_flags),
+ )
+ .field("slots.hash", &hash_for_debugging(&self.slots))
+ .finish_non_exhaustive()
+ }
+}
+// endregion
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // The code produced by derive(Clone) overflows the stack in debug mode.
+ #[test]
+ fn clone_does_not_segfault() {
+ let stack = Stack::new();
+ let _ = stack.clone();
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +
use primitive_types::{H160, U256};
+use zksync_vm2_interface::{HeapId, Tracer};
+
+use crate::{
+ addressing_modes::Addressable,
+ callframe::{Callframe, CallframeSnapshot},
+ fat_pointer::FatPointer,
+ heap::Heaps,
+ predication::Flags,
+ program::Program,
+ stack::Stack,
+ world_diff::Snapshot,
+};
+
+/// State of a [`VirtualMachine`](crate::VirtualMachine).
+#[derive(Debug)]
+pub(crate) struct State<T, W> {
+ pub(crate) registers: [U256; 16],
+ pub(crate) register_pointer_flags: u16,
+ pub(crate) flags: Flags,
+ pub(crate) current_frame: Callframe<T, W>,
+ /// Contains indices to the far call instructions currently being executed.
+ /// They are needed to continue execution from the correct spot upon return.
+ pub(crate) previous_frames: Vec<Callframe<T, W>>,
+ pub(crate) heaps: Heaps,
+ pub(crate) transaction_number: u16,
+ pub(crate) context_u128: u128,
+}
+
+impl<T, W> State<T, W> {
+ pub(crate) fn new(
+ address: H160,
+ caller: H160,
+ calldata: &[u8],
+ gas: u32,
+ program: Program<T, W>,
+ world_before_this_frame: Snapshot,
+ stack: Box<Stack>,
+ ) -> Self {
+ let mut registers: [U256; 16] = Default::default();
+ registers[1] = FatPointer {
+ memory_page: HeapId::FIRST_CALLDATA,
+ offset: 0,
+ start: 0,
+ length: u32::try_from(calldata.len()).expect("calldata length overflow"),
+ }
+ .into_u256();
+
+ Self {
+ registers,
+ register_pointer_flags: 1 << 1, // calldata is a pointer
+ flags: Flags::new(false, false, false),
+ current_frame: Callframe::new(
+ address,
+ address,
+ caller,
+ program,
+ stack,
+ HeapId::FIRST,
+ HeapId::FIRST_AUX,
+ HeapId::FIRST_CALLDATA,
+ gas,
+ 0,
+ 0,
+ 0,
+ false,
+ world_before_this_frame,
+ ),
+ previous_frames: vec![],
+
+ heaps: Heaps::new(calldata),
+
+ transaction_number: 0,
+ context_u128: 0,
+ }
+ }
+
+ #[inline(always)]
+ pub(crate) fn use_gas(&mut self, amount: u32) -> Result<(), ()> {
+ if self.current_frame.gas >= amount {
+ self.current_frame.gas -= amount;
+ Ok(())
+ } else {
+ self.current_frame.gas = 0;
+ Err(())
+ }
+ }
+
+ pub(crate) fn set_context_u128(&mut self, value: u128) {
+ self.context_u128 = value;
+ }
+
+ pub(crate) fn get_context_u128(&self) -> u128 {
+ self.current_frame.context_u128
+ }
+}
+
+impl<T: Tracer, W> State<T, W> {
+ /// Returns the total unspent gas in the VM, including stipends.
+ pub(crate) fn total_unspent_gas(&self) -> u32 {
+ self.current_frame.gas
+ + self
+ .previous_frames
+ .iter()
+ .map(Callframe::contained_gas)
+ .sum::<u32>()
+ }
+
+ pub(crate) fn snapshot(&self) -> StateSnapshot {
+ StateSnapshot {
+ registers: self.registers,
+ register_pointer_flags: self.register_pointer_flags,
+ flags: self.flags.clone(),
+ bootloader_frame: self.current_frame.snapshot(),
+ bootloader_heap_snapshot: self.heaps.snapshot(),
+ transaction_number: self.transaction_number,
+ context_u128: self.context_u128,
+ }
+ }
+
+ pub(crate) fn rollback(&mut self, snapshot: StateSnapshot) {
+ let StateSnapshot {
+ registers,
+ register_pointer_flags,
+ flags,
+ bootloader_frame,
+ bootloader_heap_snapshot,
+ transaction_number,
+ context_u128,
+ } = snapshot;
+
+ for heap in self.current_frame.rollback(bootloader_frame) {
+ self.heaps.deallocate(heap);
+ }
+ self.heaps.rollback(bootloader_heap_snapshot);
+ self.registers = registers;
+ self.register_pointer_flags = register_pointer_flags;
+ self.flags = flags;
+ self.transaction_number = transaction_number;
+ self.context_u128 = context_u128;
+ }
+
+ pub(crate) fn delete_history(&mut self) {
+ self.heaps.delete_history();
+ }
+}
+
+impl<T, W> Clone for State<T, W> {
+ fn clone(&self) -> Self {
+ Self {
+ registers: self.registers,
+ register_pointer_flags: self.register_pointer_flags,
+ flags: self.flags.clone(),
+ current_frame: self.current_frame.clone(),
+ previous_frames: self.previous_frames.clone(),
+ heaps: self.heaps.clone(),
+ transaction_number: self.transaction_number,
+ context_u128: self.context_u128,
+ }
+ }
+}
+
+impl<T, W> PartialEq for State<T, W> {
+ fn eq(&self, other: &Self) -> bool {
+ // does not compare cycle counts to work with tests that
+ // expect no change after a rollback
+ self.registers == other.registers
+ && self.register_pointer_flags == other.register_pointer_flags
+ && self.flags == other.flags
+ && self.transaction_number == other.transaction_number
+ && self.context_u128 == other.context_u128
+ && self.current_frame == other.current_frame
+ && self.previous_frames == other.previous_frames
+ && self.heaps == other.heaps
+ }
+}
+
+impl<T, W> Addressable for State<T, W> {
+ fn registers(&mut self) -> &mut [U256; 16] {
+ &mut self.registers
+ }
+
+ fn register_pointer_flags(&mut self) -> &mut u16 {
+ &mut self.register_pointer_flags
+ }
+
+ fn read_stack(&mut self, slot: u16) -> U256 {
+ self.current_frame.stack.get(slot)
+ }
+
+ fn write_stack(&mut self, slot: u16, value: U256) {
+ self.current_frame.stack.set(slot, value);
+ }
+
+ fn stack_pointer(&mut self) -> &mut u16 {
+ &mut self.current_frame.sp
+ }
+
+ fn read_stack_pointer_flag(&mut self, slot: u16) -> bool {
+ self.current_frame.stack.get_pointer_flag(slot)
+ }
+
+ fn set_stack_pointer_flag(&mut self, slot: u16) {
+ self.current_frame.stack.set_pointer_flag(slot);
+ }
+
+ fn clear_stack_pointer_flag(&mut self, slot: u16) {
+ self.current_frame.stack.clear_pointer_flag(slot);
+ }
+
+ fn code_page(&self) -> &[U256] {
+ self.current_frame.program.code_page()
+ }
+
+ fn in_kernel_mode(&self) -> bool {
+ self.current_frame.is_kernel
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct StateSnapshot {
+ registers: [U256; 16],
+ register_pointer_flags: u16,
+ flags: Flags,
+ bootloader_frame: CallframeSnapshot,
+ bootloader_heap_snapshot: (usize, usize),
+ transaction_number: u16,
+ context_u128: u128,
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +
//! Test-only tools for EraVM.
+
+use std::{
+ collections::{hash_map::DefaultHasher, BTreeMap},
+ hash::{Hash, Hasher},
+};
+
+use primitive_types::{H160, U256};
+use zkevm_opcode_defs::{
+ ethereum_types::Address, system_params::DEPLOYER_SYSTEM_CONTRACT_ADDRESS_LOW,
+};
+use zksync_vm2_interface::Tracer;
+
+use crate::{instruction_handlers::address_into_u256, Program, StorageInterface, World};
+
+/// Test [`World`] implementation.
+#[derive(Debug)]
+pub struct TestWorld<T> {
+ pub(crate) address_to_hash: BTreeMap<U256, U256>,
+ pub(crate) hash_to_contract: BTreeMap<U256, Program<T, Self>>,
+}
+
+impl<T: Tracer> TestWorld<T> {
+ /// Creates a test world with the provided programs.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the provided `Program`s are malformed.
+ pub fn new(contracts: &[(Address, Program<T, Self>)]) -> Self {
+ let mut address_to_hash = BTreeMap::new();
+ let mut hash_to_contract = BTreeMap::new();
+ for (i, (address, code)) in contracts.iter().enumerate() {
+ // We add the index to the hash because tests may leave the code page blank.
+ let mut hasher = DefaultHasher::new();
+ i.hash(&mut hasher);
+ code.code_page().hash(&mut hasher);
+
+ let mut code_info_bytes = [0; 32];
+ code_info_bytes[24..].copy_from_slice(&hasher.finish().to_be_bytes());
+ let code_len = u16::try_from(code.code_page().len())
+ .expect("code length must not exceed u16::MAX");
+ code_info_bytes[2..=3].copy_from_slice(&code_len.to_be_bytes());
+ code_info_bytes[0] = 1;
+ let hash = U256::from_big_endian(&code_info_bytes);
+
+ address_to_hash.insert(address_into_u256(*address), hash);
+ hash_to_contract.insert(hash, code.clone());
+ }
+ Self {
+ address_to_hash,
+ hash_to_contract,
+ }
+ }
+}
+
+impl<T: Tracer> World<T> for TestWorld<T> {
+ fn decommit(&mut self, hash: U256) -> Program<T, Self> {
+ if let Some(program) = self.hash_to_contract.get(&hash) {
+ program.clone()
+ } else {
+ panic!("unexpected decommit")
+ }
+ }
+
+ fn decommit_code(&mut self, hash: U256) -> Vec<u8> {
+ self.decommit(hash)
+ .code_page()
+ .iter()
+ .flat_map(|u256| {
+ let mut buffer = [0u8; 32];
+ u256.to_big_endian(&mut buffer);
+ buffer
+ })
+ .collect()
+ }
+}
+
+impl<T> StorageInterface for TestWorld<T> {
+ fn read_storage(&mut self, contract: H160, key: U256) -> Option<U256> {
+ let deployer_system_contract_address =
+ Address::from_low_u64_be(DEPLOYER_SYSTEM_CONTRACT_ADDRESS_LOW.into());
+
+ if contract == deployer_system_contract_address {
+ Some(
+ self.address_to_hash
+ .get(&key)
+ .copied()
+ .unwrap_or(U256::zero()),
+ )
+ } else {
+ None
+ }
+ }
+
+ fn cost_of_writing_storage(&mut self, _initial_value: Option<U256>, _new_value: U256) -> u32 {
+ 50
+ }
+
+ fn is_free_storage_slot(&self, _contract: &H160, _key: &U256) -> bool {
+ false
+ }
+}
+
+/// May be used to load code when the VM first starts up.
+/// Doesn't check for any errors.
+/// Doesn't cost anything but also doesn't make the code free in future decommits.
+#[doc(hidden)] // should be used only in low-level testing / benches
+pub fn initial_decommit<T: Tracer, W: World<T>>(world: &mut W, address: H160) -> Program<T, W> {
+ let deployer_system_contract_address =
+ Address::from_low_u64_be(DEPLOYER_SYSTEM_CONTRACT_ADDRESS_LOW.into());
+ let code_info = world
+ .read_storage(deployer_system_contract_address, address_into_u256(address))
+ .unwrap_or_default();
+
+ let mut code_info_bytes = [0; 32];
+ code_info.to_big_endian(&mut code_info_bytes);
+
+ code_info_bytes[1] = 0;
+ let code_key: U256 = U256::from_big_endian(&code_info_bytes);
+
+ world.decommit(code_key)
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +
use std::cmp::Ordering;
+
+use primitive_types::{H160, U256};
+use zksync_vm2_interface::{
+ CallframeInterface, Event, Flags, HeapId, L2ToL1Log, StateInterface, Tracer,
+};
+
+use crate::{
+ callframe::{Callframe, NearCallFrame},
+ decommit::is_kernel,
+ predication::{self, Predicate},
+ VirtualMachine,
+};
+
+impl<T: Tracer, W> StateInterface for VirtualMachine<T, W> {
+ fn read_register(&self, register: u8) -> (U256, bool) {
+ (
+ self.state.registers[register as usize],
+ self.state.register_pointer_flags & (1 << register) != 0,
+ )
+ }
+
+ fn set_register(&mut self, register: u8, value: U256, is_pointer: bool) {
+ self.state.registers[register as usize] = value;
+
+ self.state.register_pointer_flags &= !(1 << register);
+ self.state.register_pointer_flags |= u16::from(is_pointer) << register;
+ }
+
+ fn number_of_callframes(&self) -> usize {
+ self.state
+ .previous_frames
+ .iter()
+ .map(|frame| frame.near_calls.len() + 1)
+ .sum::<usize>()
+ + self.state.current_frame.near_calls.len()
+ + 1
+ }
+
+ fn current_frame(&mut self) -> impl CallframeInterface + '_ {
+ let near_call = self.state.current_frame.near_calls.len().checked_sub(1);
+ CallframeWrapper {
+ frame: &mut self.state.current_frame,
+ near_call,
+ }
+ }
+
+ fn callframe(&mut self, mut n: usize) -> impl CallframeInterface + '_ {
+ for far_frame in std::iter::once(&mut self.state.current_frame)
+ .chain(self.state.previous_frames.iter_mut().rev())
+ {
+ let near_calls = far_frame.near_calls.len();
+ match n.cmp(&near_calls) {
+ Ordering::Less => {
+ return CallframeWrapper {
+ frame: far_frame,
+ near_call: Some(near_calls - 1 - n),
+ }
+ }
+ Ordering::Equal => {
+ return CallframeWrapper {
+ frame: far_frame,
+ near_call: None,
+ }
+ }
+ Ordering::Greater => n -= near_calls + 1,
+ }
+ }
+ panic!("Callframe index out of bounds")
+ }
+
+ fn read_heap_byte(&self, heap: HeapId, index: u32) -> u8 {
+ self.state.heaps[heap].read_byte(index)
+ }
+
+ fn read_heap_u256(&self, heap: HeapId, index: u32) -> U256 {
+ self.state.heaps[heap].read_u256(index)
+ }
+
+ fn write_heap_u256(&mut self, heap: HeapId, index: u32, value: U256) {
+ self.state.heaps.write_u256(heap, index, value);
+ }
+
+ fn flags(&self) -> Flags {
+ let flags = &self.state.flags;
+ Flags {
+ less_than: Predicate::IfLT.satisfied(flags),
+ greater: Predicate::IfGT.satisfied(flags),
+ equal: Predicate::IfEQ.satisfied(flags),
+ }
+ }
+
+ fn set_flags(&mut self, flags: Flags) {
+ self.state.flags = predication::Flags::new(flags.less_than, flags.equal, flags.greater);
+ }
+
+ fn transaction_number(&self) -> u16 {
+ self.state.transaction_number
+ }
+
+ fn set_transaction_number(&mut self, value: u16) {
+ self.state.transaction_number = value;
+ }
+
+ fn context_u128_register(&self) -> u128 {
+ self.state.context_u128
+ }
+
+ fn set_context_u128_register(&mut self, value: u128) {
+ self.state.context_u128 = value;
+ }
+
+ fn get_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)> {
+ self.world_diff
+ .get_storage_state()
+ .iter()
+ .map(|(key, value)| (*key, *value))
+ }
+
+ fn get_transient_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)> {
+ self.world_diff
+ .get_transient_storage_state()
+ .iter()
+ .map(|(key, value)| (*key, *value))
+ }
+
+ fn get_transient_storage(&self, address: H160, slot: U256) -> U256 {
+ self.world_diff
+ .get_transient_storage_state()
+ .get(&(address, slot))
+ .copied()
+ .unwrap_or_default()
+ }
+
+ fn write_transient_storage(&mut self, address: H160, slot: U256, value: U256) {
+ self.world_diff
+ .write_transient_storage(address, slot, value);
+ }
+
+ fn events(&self) -> impl Iterator<Item = Event> {
+ self.world_diff.events().iter().copied()
+ }
+
+ fn l2_to_l1_logs(&self) -> impl Iterator<Item = L2ToL1Log> {
+ self.world_diff.l2_to_l1_logs().iter().copied()
+ }
+
+ fn pubdata(&self) -> i32 {
+ self.world_diff.pubdata()
+ }
+
+ fn set_pubdata(&mut self, value: i32) {
+ self.world_diff.pubdata.0 = value;
+ }
+}
+
+struct CallframeWrapper<'a, T, W> {
+ frame: &'a mut Callframe<T, W>,
+ near_call: Option<usize>,
+}
+
+impl<T: Tracer, W> CallframeInterface for CallframeWrapper<'_, T, W> {
+ fn address(&self) -> H160 {
+ self.frame.address
+ }
+
+ fn set_address(&mut self, address: H160) {
+ self.frame.address = address;
+ self.frame.is_kernel = is_kernel(address);
+ }
+
+ fn code_address(&self) -> H160 {
+ self.frame.code_address
+ }
+
+ fn set_code_address(&mut self, address: H160) {
+ self.frame.code_address = address;
+ }
+
+ fn caller(&self) -> H160 {
+ self.frame.caller
+ }
+
+ fn set_caller(&mut self, address: H160) {
+ self.frame.caller = address;
+ }
+
+ fn is_static(&self) -> bool {
+ self.frame.is_static
+ }
+
+ fn is_kernel(&self) -> bool {
+ self.frame.is_kernel
+ }
+
+ fn stipend(&self) -> u32 {
+ self.frame.stipend
+ }
+
+ fn context_u128(&self) -> u128 {
+ self.frame.context_u128
+ }
+
+ fn set_context_u128(&mut self, value: u128) {
+ self.frame.context_u128 = value;
+ }
+
+ fn read_stack(&self, index: u16) -> (U256, bool) {
+ (
+ self.frame.stack.get(index),
+ self.frame.stack.get_pointer_flag(index),
+ )
+ }
+
+ fn write_stack(&mut self, index: u16, value: U256, is_pointer: bool) {
+ self.frame.stack.set(index, value);
+ if is_pointer {
+ self.frame.stack.set_pointer_flag(index);
+ } else {
+ self.frame.stack.clear_pointer_flag(index);
+ }
+ }
+
+ fn heap(&self) -> HeapId {
+ self.frame.heap
+ }
+
+ fn heap_bound(&self) -> u32 {
+ self.frame.heap_size
+ }
+
+ fn set_heap_bound(&mut self, value: u32) {
+ self.frame.heap_size = value;
+ }
+
+ fn aux_heap(&self) -> HeapId {
+ self.frame.aux_heap
+ }
+
+ fn aux_heap_bound(&self) -> u32 {
+ self.frame.aux_heap_size
+ }
+
+ fn set_aux_heap_bound(&mut self, value: u32) {
+ self.frame.aux_heap_size = value;
+ }
+
+ fn read_contract_code(&self, slot: u16) -> U256 {
+ self.frame.program.code_page()[slot as usize]
+ }
+
+ // The following methods are affected by near calls
+
+ fn is_near_call(&self) -> bool {
+ self.near_call.is_some()
+ }
+
+ fn gas(&self) -> u32 {
+ if let Some(call) = self.near_call_on_top() {
+ call.previous_frame_gas
+ } else {
+ self.frame.gas
+ }
+ }
+
+ fn set_gas(&mut self, new_gas: u32) {
+ if let Some(call) = self.near_call_on_top_mut() {
+ call.previous_frame_gas = new_gas;
+ } else {
+ self.frame.gas = new_gas;
+ }
+ }
+
+ fn stack_pointer(&self) -> u16 {
+ if let Some(call) = self.near_call_on_top() {
+ call.previous_frame_sp
+ } else {
+ self.frame.sp
+ }
+ }
+
+ fn set_stack_pointer(&mut self, value: u16) {
+ if let Some(call) = self.near_call_on_top_mut() {
+ call.previous_frame_sp = value;
+ } else {
+ self.frame.sp = value;
+ }
+ }
+
+ // we don't expect the VM to run on 16-bit machines, and sign loss / wrap is checked
+ #[allow(
+ clippy::cast_sign_loss,
+ clippy::cast_possible_truncation,
+ clippy::cast_possible_wrap
+ )]
+ fn program_counter(&self) -> Option<u16> {
+ if let Some(call) = self.near_call_on_top() {
+ Some(call.previous_frame_pc)
+ } else {
+ let offset = self.frame.get_raw_pc();
+ if offset < 0
+ || offset > u16::MAX as isize
+ || self.frame.program.instruction(offset as u16).is_none()
+ {
+ None
+ } else {
+ Some(offset as u16)
+ }
+ }
+ }
+
+ fn set_program_counter(&mut self, value: u16) {
+ if let Some(call) = self.near_call_on_top_mut() {
+ call.previous_frame_pc = value;
+ } else {
+ self.frame.set_pc_from_u16(value);
+ }
+ }
+
+ fn exception_handler(&self) -> u16 {
+ if let Some(i) = self.near_call {
+ self.frame.near_calls[i].exception_handler
+ } else {
+ self.frame.exception_handler
+ }
+ }
+
+ fn set_exception_handler(&mut self, value: u16) {
+ if let Some(i) = self.near_call {
+ self.frame.near_calls[i].exception_handler = value;
+ } else {
+ self.frame.exception_handler = value;
+ }
+ }
+}
+
+impl<T, W> CallframeWrapper<'_, T, W> {
+ fn near_call_on_top(&self) -> Option<&NearCallFrame> {
+ let index = self.near_call.map_or(0, |i| i + 1);
+ self.frame.near_calls.get(index)
+ }
+
+ fn near_call_on_top_mut(&mut self) -> Option<&mut NearCallFrame> {
+ let index = self.near_call.map_or(0, |i| i + 1);
+ self.frame.near_calls.get_mut(index)
+ }
+}
+
+#[cfg(all(test, not(feature = "single_instruction_test")))]
+mod test {
+ use primitive_types::H160;
+ use zkevm_opcode_defs::ethereum_types::Address;
+ use zksync_vm2_interface::opcodes;
+
+ use super::*;
+ use crate::{
+ testonly::{initial_decommit, TestWorld},
+ Instruction, Program, VirtualMachine,
+ };
+
+ #[test]
+ fn callframe_picking() {
+ let program = Program::from_raw(vec![Instruction::from_invalid()], vec![]);
+
+ let address = Address::from_low_u64_be(0x_1234_5678_90ab_cdef);
+ let mut world = TestWorld::new(&[(address, program)]);
+ let program = initial_decommit(&mut world, address);
+
+ let mut vm = VirtualMachine::new(
+ address,
+ program.clone(),
+ Address::zero(),
+ &[],
+ 1000,
+ crate::Settings {
+ default_aa_code_hash: [0; 32],
+ evm_interpreter_code_hash: [0; 32],
+ hook_address: 0,
+ },
+ );
+
+ vm.state.current_frame.gas = 0;
+ vm.state.current_frame.exception_handler = 0;
+ let mut frame_count = 1;
+
+ let add_far_frame = |vm: &mut VirtualMachine<(), TestWorld<()>>, counter: &mut u16| {
+ vm.push_frame::<opcodes::Normal>(
+ H160::from_low_u64_be(1),
+ program.clone(),
+ (*counter).into(),
+ 0,
+ *counter,
+ false,
+ HeapId::from_u32_unchecked(5),
+ vm.world_diff.snapshot(),
+ );
+ assert_eq!(vm.current_frame().gas(), (*counter).into());
+ *counter += 1;
+ };
+
+ let add_near_frame = |vm: &mut VirtualMachine<(), TestWorld<()>>, counter: &mut u16| {
+ let count_u32 = (*counter).into();
+ vm.state.current_frame.gas += count_u32;
+ vm.state
+ .current_frame
+ .push_near_call(count_u32, *counter, vm.world_diff.snapshot());
+ assert_eq!(vm.current_frame().gas(), (*counter).into());
+ *counter += 1;
+ };
+
+ add_far_frame(&mut vm, &mut frame_count);
+ add_near_frame(&mut vm, &mut frame_count);
+ add_far_frame(&mut vm, &mut frame_count);
+ add_far_frame(&mut vm, &mut frame_count);
+ add_near_frame(&mut vm, &mut frame_count);
+ add_near_frame(&mut vm, &mut frame_count);
+
+ for (fwd, rev) in (0..frame_count.into()).zip((0..frame_count).rev()) {
+ assert_eq!(vm.callframe(fwd).exception_handler(), rev);
+ assert_eq!(vm.callframe(fwd).gas(), rev.into());
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +
use std::fmt;
+
+use primitive_types::H160;
+use zksync_vm2_interface::{opcodes::TypeLevelCallingMode, CallingMode, HeapId, Tracer};
+
+use crate::{
+ callframe::{Callframe, FrameRemnant},
+ decommit::u256_into_address,
+ instruction::ExecutionStatus,
+ stack::StackPool,
+ state::{State, StateSnapshot},
+ world_diff::{ExternalSnapshot, Snapshot, WorldDiff},
+ ExecutionEnd, Program, World,
+};
+
+/// [`VirtualMachine`] settings.
+#[derive(Debug, Clone)]
+pub struct Settings {
+ /// Bytecode hash of the default account abstraction contract.
+ pub default_aa_code_hash: [u8; 32],
+ /// Bytecode hash of the EVM interpreter.
+ pub evm_interpreter_code_hash: [u8; 32],
+ /// Writing to this address in the bootloader's heap suspends execution
+ pub hook_address: u32,
+}
+
+/// High-performance out-of-circuit EraVM implementation.
+#[derive(Debug)]
+pub struct VirtualMachine<T, W> {
+ pub(crate) world_diff: WorldDiff,
+ pub(crate) state: State<T, W>,
+ pub(crate) settings: Settings,
+ pub(crate) stack_pool: StackPool,
+ pub(crate) snapshot: Option<VmSnapshot>,
+}
+
+impl<T: Tracer, W: World<T>> VirtualMachine<T, W> {
+ /// Creates a new VM instance.
+ pub fn new(
+ address: H160,
+ program: Program<T, W>,
+ caller: H160,
+ calldata: &[u8],
+ gas: u32,
+ settings: Settings,
+ ) -> Self {
+ let world_diff = WorldDiff::default();
+ let world_before_this_frame = world_diff.snapshot();
+ let mut stack_pool = StackPool::default();
+
+ Self {
+ world_diff,
+ state: State::new(
+ address,
+ caller,
+ calldata,
+ gas,
+ program,
+ world_before_this_frame,
+ stack_pool.get(),
+ ),
+ settings,
+ stack_pool,
+ snapshot: None,
+ }
+ }
+
+ /// Provides a reference to the [`World`] diff accumulated by VM execution so far.
+ pub fn world_diff(&self) -> &WorldDiff {
+ &self.world_diff
+ }
+
+ /// Provides a mutable reference to the [`World`] diff accumulated by VM execution so far.
+ ///
+ /// It is unsound to mutate [`WorldDiff`] in the middle of VM execution in the general case; thus, this method should only be used in tests.
+ #[doc(hidden)]
+ pub fn world_diff_mut(&mut self) -> &mut WorldDiff {
+ &mut self.world_diff
+ }
+
+ /// Runs this VM with the specified [`World`] and [`Tracer`] until an end of execution due to a hook, or an error.
+ pub fn run(&mut self, world: &mut W, tracer: &mut T) -> ExecutionEnd {
+ unsafe {
+ loop {
+ if let ExecutionStatus::Stopped(end) =
+ ((*self.state.current_frame.pc).handler)(self, world, tracer)
+ {
+ return end;
+ }
+ }
+ }
+ }
+
+ /// Returns how much of the extra gas limit is left and the stop reason,
+ /// unless the extra gas limit was exceeded.
+ ///
+ /// Needed to support account validation gas limit.
+ /// We cannot simply reduce the available gas, as contracts might behave differently
+ /// depending on remaining gas.
+ pub fn resume_with_additional_gas_limit(
+ &mut self,
+ world: &mut W,
+ tracer: &mut T,
+ gas_limit: u32,
+ ) -> Option<(u32, ExecutionEnd)> {
+ let minimum_gas = self.state.total_unspent_gas().saturating_sub(gas_limit);
+
+ let end = unsafe {
+ loop {
+ if let ExecutionStatus::Stopped(end) =
+ ((*self.state.current_frame.pc).handler)(self, world, tracer)
+ {
+ break end;
+ }
+
+ if self.state.total_unspent_gas() < minimum_gas {
+ return None;
+ }
+ }
+ };
+
+ self.state
+ .total_unspent_gas()
+ .checked_sub(minimum_gas)
+ .map(|left| (left, end))
+ }
+
+ /// Creates a VM snapshot. The snapshot can then be rolled back to, or discarded.
+ ///
+ /// # Panics
+ ///
+ /// - Panics if called outside the initial (bootloader) callframe.
+ /// - Panics if this VM already has a snapshot.
+ pub fn make_snapshot(&mut self) {
+ assert!(self.snapshot.is_none(), "VM already has a snapshot");
+ assert!(
+ self.state.previous_frames.is_empty(),
+ "Snapshotting is only allowed in the bootloader"
+ );
+
+ self.snapshot = Some(VmSnapshot {
+ world_snapshot: self.world_diff.external_snapshot(),
+ state_snapshot: self.state.snapshot(),
+ });
+ }
+
+ /// Returns the VM to the state it was in when [`Self::make_snapshot()`] was called.
+ ///
+ /// # Panics
+ ///
+ /// - Panics if this VM doesn't hold a snapshot.
+ /// - Panics if called outside the initial (bootloader) callframe.
+ pub fn rollback(&mut self) {
+ assert!(
+ self.state.previous_frames.is_empty(),
+ "Rolling back is only allowed in the bootloader"
+ );
+
+ let snapshot = self
+ .snapshot
+ .take()
+ .expect("`rollback()` called without a snapshot");
+ self.world_diff.external_rollback(snapshot.world_snapshot);
+ self.state.rollback(snapshot.state_snapshot);
+ self.delete_history();
+ }
+
+ /// Pops a [previously made](Self::make_snapshot()) snapshot without rolling back to it. This effectively commits
+ /// all changes made up to this point, so that they cannot be rolled back.
+ ///
+ /// # Panics
+ ///
+ /// - Panics if called outside the initial (bootloader) callframe.
+ pub fn pop_snapshot(&mut self) {
+ assert!(
+ self.state.previous_frames.is_empty(),
+ "Popping a snapshot is only allowed in the bootloader"
+ );
+ self.snapshot = None;
+ self.delete_history();
+ }
+
+ /// This must only be called when it is known that the VM cannot be rolled back,
+ /// so there must not be any external snapshots and the callstack
+ /// should ideally be empty, though in practice it sometimes contains
+ /// a near call inside the bootloader.
+ fn delete_history(&mut self) {
+ self.world_diff.delete_history();
+ self.state.delete_history();
+ }
+}
+
+impl<T: Tracer, W> VirtualMachine<T, W> {
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn push_frame<M: TypeLevelCallingMode>(
+ &mut self,
+ code_address: H160,
+ program: Program<T, W>,
+ gas: u32,
+ stipend: u32,
+ exception_handler: u16,
+ is_static: bool,
+ calldata_heap: HeapId,
+ world_before_this_frame: Snapshot,
+ ) {
+ let mut new_frame = Callframe::new(
+ if M::VALUE == CallingMode::Delegate {
+ self.state.current_frame.address
+ } else {
+ code_address
+ },
+ code_address,
+ match M::VALUE {
+ CallingMode::Normal => self.state.current_frame.address,
+ CallingMode::Delegate => self.state.current_frame.caller,
+ CallingMode::Mimic => u256_into_address(self.state.registers[15]),
+ },
+ program,
+ self.stack_pool.get(),
+ self.state.heaps.allocate(),
+ self.state.heaps.allocate(),
+ calldata_heap,
+ gas,
+ stipend,
+ exception_handler,
+ if M::VALUE == CallingMode::Delegate {
+ self.state.current_frame.context_u128
+ } else {
+ self.state.context_u128
+ },
+ is_static,
+ world_before_this_frame,
+ );
+ self.state.context_u128 = 0;
+
+ std::mem::swap(&mut new_frame, &mut self.state.current_frame);
+ self.state.previous_frames.push(new_frame);
+ }
+
+ pub(crate) fn pop_frame(&mut self, heap_to_keep: Option<HeapId>) -> Option<FrameRemnant> {
+ self.state.previous_frames.pop().map(|mut frame| {
+ for &heap in [
+ self.state.current_frame.heap,
+ self.state.current_frame.aux_heap,
+ ]
+ .iter()
+ .chain(&self.state.current_frame.heaps_i_am_keeping_alive)
+ {
+ if Some(heap) != heap_to_keep {
+ self.state.heaps.deallocate(heap);
+ }
+ }
+
+ std::mem::swap(&mut self.state.current_frame, &mut frame);
+ let Callframe {
+ exception_handler,
+ world_before_this_frame,
+ stack,
+ ..
+ } = frame;
+
+ self.stack_pool.recycle(stack);
+
+ self.state
+ .current_frame
+ .heaps_i_am_keeping_alive
+ .extend(heap_to_keep);
+
+ FrameRemnant {
+ exception_handler,
+ snapshot: world_before_this_frame,
+ }
+ })
+ }
+
+ pub(crate) fn start_new_tx(&mut self) {
+ self.state.transaction_number = self.state.transaction_number.wrapping_add(1);
+ self.world_diff.clear_transient_storage();
+ }
+}
+
+impl<T: fmt::Debug, W: fmt::Debug> VirtualMachine<T, W> {
+ /// Dumps an opaque representation of the current VM state.
+ #[doc(hidden)] // should only be used in tests
+ pub fn dump_state(&self) -> impl PartialEq + fmt::Debug {
+ self.state.clone()
+ }
+}
+
+/// Snapshot of a [`VirtualMachine`].
+#[derive(Debug)]
+pub(crate) struct VmSnapshot {
+ world_snapshot: ExternalSnapshot,
+ state_snapshot: StateSnapshot,
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +
use std::collections::BTreeMap;
+
+use primitive_types::{H160, U256};
+use zkevm_opcode_defs::system_params::{
+ STORAGE_ACCESS_COLD_READ_COST, STORAGE_ACCESS_COLD_WRITE_COST, STORAGE_ACCESS_WARM_READ_COST,
+ STORAGE_ACCESS_WARM_WRITE_COST,
+};
+use zksync_vm2_interface::{CycleStats, Event, L2ToL1Log, Tracer};
+
+use crate::{
+ rollback::{Rollback, RollbackableLog, RollbackableMap, RollbackablePod, RollbackableSet},
+ StorageInterface,
+};
+
+/// Pending modifications to the global state that are executed at the end of a block.
+/// In other words, side effects.
+#[derive(Debug, Default)]
+pub struct WorldDiff {
+ // These are rolled back on revert or panic (and when the whole VM is rolled back).
+ storage_changes: RollbackableMap<(H160, U256), U256>,
+ paid_changes: RollbackableMap<(H160, U256), u32>,
+ transient_storage_changes: RollbackableMap<(H160, U256), U256>,
+ events: RollbackableLog<Event>,
+ l2_to_l1_logs: RollbackableLog<L2ToL1Log>,
+ pub(crate) pubdata: RollbackablePod<i32>,
+ storage_refunds: RollbackableLog<u32>,
+ pubdata_costs: RollbackableLog<i32>,
+
+ // The fields below are only rolled back when the whole VM is rolled back.
+ /// Values indicate whether a bytecode was successfully decommitted. When accessing decommitted hashes
+ /// for the execution state, we need to track both successful and failed decommitments; OTOH, only successful ones
+ /// matter when computing decommitment cost.
+ pub(crate) decommitted_hashes: RollbackableMap<U256, bool>,
+ read_storage_slots: RollbackableSet<(H160, U256)>,
+ written_storage_slots: RollbackableSet<(H160, U256)>,
+
+ // This is never rolled back. It is just a cache to avoid asking these from DB every time.
+ storage_initial_values: BTreeMap<(H160, U256), Option<U256>>,
+}
+
+#[derive(Debug)]
+pub(crate) struct ExternalSnapshot {
+ internal_snapshot: Snapshot,
+ pub(crate) decommitted_hashes: <RollbackableMap<U256, ()> as Rollback>::Snapshot,
+ read_storage_slots: <RollbackableMap<(H160, U256), ()> as Rollback>::Snapshot,
+ written_storage_slots: <RollbackableMap<(H160, U256), ()> as Rollback>::Snapshot,
+ storage_refunds: <RollbackableLog<u32> as Rollback>::Snapshot,
+ pubdata_costs: <RollbackableLog<i32> as Rollback>::Snapshot,
+}
+
+impl WorldDiff {
+ /// Returns the storage slot's value and a refund based on its hot/cold status.
+ pub(crate) fn read_storage(
+ &mut self,
+ world: &mut impl StorageInterface,
+ tracer: &mut impl Tracer,
+ contract: H160,
+ key: U256,
+ ) -> (U256, u32) {
+ let (value, refund) = self.read_storage_inner(world, tracer, contract, key);
+ self.storage_refunds.push(refund);
+ (value, refund)
+ }
+
+ /// Same as [`Self::read_storage()`], but without recording the refund value (which is important
+ /// because the storage is read not only from the `sload` op handler, but also from the `farcall` op handler;
+ /// the latter must not record a refund as per previous VM versions).
+ pub(crate) fn read_storage_without_refund(
+ &mut self,
+ world: &mut impl StorageInterface,
+ tracer: &mut impl Tracer,
+ contract: H160,
+ key: U256,
+ ) -> U256 {
+ self.read_storage_inner(world, tracer, contract, key).0
+ }
+
+ fn read_storage_inner(
+ &mut self,
+ world: &mut impl StorageInterface,
+ tracer: &mut impl Tracer,
+ contract: H160,
+ key: U256,
+ ) -> (U256, u32) {
+ let value = self
+ .storage_changes
+ .as_ref()
+ .get(&(contract, key))
+ .copied()
+ .unwrap_or_else(|| world.read_storage(contract, key).unwrap_or_default());
+
+ let newly_added = self.read_storage_slots.add((contract, key));
+ if newly_added {
+ tracer.on_extra_prover_cycles(CycleStats::StorageRead);
+ }
+
+ let refund = if !newly_added || world.is_free_storage_slot(&contract, &key) {
+ WARM_READ_REFUND
+ } else {
+ 0
+ };
+ self.pubdata_costs.push(0);
+ (value, refund)
+ }
+
+ /// Returns the refund based the hot/cold status of the storage slot and the change in pubdata.
+ pub(crate) fn write_storage(
+ &mut self,
+ world: &mut impl StorageInterface,
+ tracer: &mut impl Tracer,
+ contract: H160,
+ key: U256,
+ value: U256,
+ ) -> u32 {
+ self.storage_changes.insert((contract, key), value);
+
+ let initial_value = self
+ .storage_initial_values
+ .entry((contract, key))
+ .or_insert_with(|| world.read_storage(contract, key));
+
+ if world.is_free_storage_slot(&contract, &key) {
+ if self.written_storage_slots.add((contract, key)) {
+ tracer.on_extra_prover_cycles(CycleStats::StorageWrite);
+ }
+ self.read_storage_slots.add((contract, key));
+
+ self.storage_refunds.push(WARM_WRITE_REFUND);
+ self.pubdata_costs.push(0);
+ return WARM_WRITE_REFUND;
+ }
+
+ let update_cost = world.cost_of_writing_storage(*initial_value, value);
+ let prepaid = self
+ .paid_changes
+ .insert((contract, key), update_cost)
+ .unwrap_or(0);
+
+ let refund = if self.written_storage_slots.add((contract, key)) {
+ tracer.on_extra_prover_cycles(CycleStats::StorageWrite);
+
+ if self.read_storage_slots.add((contract, key)) {
+ 0
+ } else {
+ COLD_WRITE_AFTER_WARM_READ_REFUND
+ }
+ } else {
+ WARM_WRITE_REFUND
+ };
+
+ #[allow(clippy::cast_possible_wrap)]
+ {
+ let pubdata_cost = (update_cost as i32) - (prepaid as i32);
+ self.pubdata.0 += pubdata_cost;
+ self.storage_refunds.push(refund);
+ self.pubdata_costs.push(pubdata_cost);
+ }
+ refund
+ }
+
+ pub(crate) fn pubdata(&self) -> i32 {
+ self.pubdata.0
+ }
+
+ /// Returns recorded refunds for all storage operations.
+ pub fn storage_refunds(&self) -> &[u32] {
+ self.storage_refunds.as_ref()
+ }
+
+ /// Returns recorded pubdata costs for all storage operations.
+ pub fn pubdata_costs(&self) -> &[i32] {
+ self.pubdata_costs.as_ref()
+ }
+
+ #[doc(hidden)] // duplicates `StateInterface::get_storage_state()`, but we use random access in some places
+ pub fn get_storage_state(&self) -> &BTreeMap<(H160, U256), U256> {
+ self.storage_changes.as_ref()
+ }
+
+ /// Gets changes for all touched storage slots.
+ pub fn get_storage_changes(
+ &self,
+ ) -> impl Iterator<Item = ((H160, U256), (Option<U256>, U256))> + '_ {
+ self.storage_changes
+ .as_ref()
+ .iter()
+ .filter_map(|(key, &value)| {
+ if self.storage_initial_values[key].unwrap_or_default() == value {
+ None
+ } else {
+ Some((*key, (self.storage_initial_values[key], value)))
+ }
+ })
+ }
+
+ /// Gets changes for storage slots touched after the specified `snapshot` was created.
+ pub fn get_storage_changes_after(
+ &self,
+ snapshot: &Snapshot,
+ ) -> impl Iterator<Item = ((H160, U256), StorageChange)> + '_ {
+ self.storage_changes
+ .changes_after(snapshot.storage_changes)
+ .into_iter()
+ .map(|(key, (before, after))| {
+ let initial = self.storage_initial_values[&key];
+ (
+ key,
+ StorageChange {
+ before: before.or(initial),
+ after,
+ is_initial: initial.is_none(),
+ },
+ )
+ })
+ }
+
+ pub(crate) fn read_transient_storage(&mut self, contract: H160, key: U256) -> U256 {
+ self.pubdata_costs.push(0);
+ self.transient_storage_changes
+ .as_ref()
+ .get(&(contract, key))
+ .copied()
+ .unwrap_or_default()
+ }
+
+ pub(crate) fn write_transient_storage(&mut self, contract: H160, key: U256, value: U256) {
+ self.pubdata_costs.push(0);
+ self.transient_storage_changes
+ .insert((contract, key), value);
+ }
+
+ pub(crate) fn get_transient_storage_state(&self) -> &BTreeMap<(H160, U256), U256> {
+ self.transient_storage_changes.as_ref()
+ }
+
+ pub(crate) fn record_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+
+ pub(crate) fn events(&self) -> &[Event] {
+ self.events.as_ref()
+ }
+
+ /// Returns events emitted after the specified `snapshot` was created.
+ pub fn events_after(&self, snapshot: &Snapshot) -> &[Event] {
+ self.events.logs_after(snapshot.events)
+ }
+
+ pub(crate) fn record_l2_to_l1_log(&mut self, log: L2ToL1Log) {
+ self.l2_to_l1_logs.push(log);
+ }
+
+ pub(crate) fn l2_to_l1_logs(&self) -> &[L2ToL1Log] {
+ self.l2_to_l1_logs.as_ref()
+ }
+
+ /// Returns L2-to-L1 logs emitted after the specified `snapshot` was created.
+ pub fn l2_to_l1_logs_after(&self, snapshot: &Snapshot) -> &[L2ToL1Log] {
+ self.l2_to_l1_logs.logs_after(snapshot.l2_to_l1_logs)
+ }
+
+ /// Returns hashes of decommitted contract bytecodes in no particular order. Note that this includes
+ /// failed (out-of-gas) decommitments.
+ pub fn decommitted_hashes(&self) -> impl Iterator<Item = U256> + '_ {
+ self.decommitted_hashes.as_ref().keys().copied()
+ }
+
+ /// Get a snapshot for selecting which logs & co. to output using [`Self::events_after()`] and other methods.
+ pub fn snapshot(&self) -> Snapshot {
+ Snapshot {
+ storage_changes: self.storage_changes.snapshot(),
+ paid_changes: self.paid_changes.snapshot(),
+ events: self.events.snapshot(),
+ l2_to_l1_logs: self.l2_to_l1_logs.snapshot(),
+ transient_storage_changes: self.transient_storage_changes.snapshot(),
+ pubdata: self.pubdata.snapshot(),
+ }
+ }
+
+ #[allow(clippy::needless_pass_by_value)] // intentional: we require a snapshot to be rolled back to no more than once
+ pub(crate) fn rollback(&mut self, snapshot: Snapshot) {
+ self.storage_changes.rollback(snapshot.storage_changes);
+ self.paid_changes.rollback(snapshot.paid_changes);
+ self.events.rollback(snapshot.events);
+ self.l2_to_l1_logs.rollback(snapshot.l2_to_l1_logs);
+ self.transient_storage_changes
+ .rollback(snapshot.transient_storage_changes);
+ self.pubdata.rollback(snapshot.pubdata);
+ }
+
+ /// This function must only be called during the initial frame
+ /// because otherwise internal rollbacks can roll back past the external snapshot.
+ pub(crate) fn external_snapshot(&self) -> ExternalSnapshot {
+ // Rolling back to this snapshot will clear transient storage even though it is not empty
+ // after a transaction. This is ok because the next instruction in the bootloader
+ // (IncrementTxNumber) clears the transient storage anyway.
+ // This is necessary because clear_transient_storage cannot be undone.
+ ExternalSnapshot {
+ internal_snapshot: Snapshot {
+ transient_storage_changes: 0,
+ ..self.snapshot()
+ },
+ decommitted_hashes: self.decommitted_hashes.snapshot(),
+ read_storage_slots: self.read_storage_slots.snapshot(),
+ written_storage_slots: self.written_storage_slots.snapshot(),
+ storage_refunds: self.storage_refunds.snapshot(),
+ pubdata_costs: self.pubdata_costs.snapshot(),
+ }
+ }
+
+ pub(crate) fn external_rollback(&mut self, snapshot: ExternalSnapshot) {
+ self.rollback(snapshot.internal_snapshot);
+ self.storage_refunds.rollback(snapshot.storage_refunds);
+ self.pubdata_costs.rollback(snapshot.pubdata_costs);
+ self.decommitted_hashes
+ .rollback(snapshot.decommitted_hashes);
+ self.read_storage_slots
+ .rollback(snapshot.read_storage_slots);
+ self.written_storage_slots
+ .rollback(snapshot.written_storage_slots);
+ }
+
+ pub(crate) fn delete_history(&mut self) {
+ self.storage_changes.delete_history();
+ self.paid_changes.delete_history();
+ self.transient_storage_changes.delete_history();
+ self.events.delete_history();
+ self.l2_to_l1_logs.delete_history();
+ self.pubdata.delete_history();
+ self.storage_refunds.delete_history();
+ self.pubdata_costs.delete_history();
+ self.decommitted_hashes.delete_history();
+ self.read_storage_slots.delete_history();
+ self.written_storage_slots.delete_history();
+ }
+
+ pub(crate) fn clear_transient_storage(&mut self) {
+ self.transient_storage_changes = RollbackableMap::default();
+ }
+}
+
+/// Opaque snapshot of a [`WorldDiff`] output by its [eponymous method](WorldDiff::snapshot()).
+/// Can be provided to [`WorldDiff::events_after()`] etc. to get data after the snapshot was created.
+#[derive(Clone, PartialEq, Debug)]
+pub struct Snapshot {
+ storage_changes: <RollbackableMap<(H160, U256), U256> as Rollback>::Snapshot,
+ paid_changes: <RollbackableMap<(H160, U256), u32> as Rollback>::Snapshot,
+ events: <RollbackableLog<Event> as Rollback>::Snapshot,
+ l2_to_l1_logs: <RollbackableLog<L2ToL1Log> as Rollback>::Snapshot,
+ transient_storage_changes: <RollbackableMap<(H160, U256), U256> as Rollback>::Snapshot,
+ pubdata: <RollbackablePod<i32> as Rollback>::Snapshot,
+}
+
+/// Change in a single storage slot.
+#[derive(Debug, PartialEq)]
+pub struct StorageChange {
+ /// Value before the slot was written to. `None` if the slot was not written to previously.
+ pub before: Option<U256>,
+ /// Value written to the slot.
+ pub after: U256,
+ /// `true` if the slot is not set in the [`World`](crate::World).
+ /// A write may be initial even if it isn't the first write to a slot!
+ pub is_initial: bool,
+}
+
+const WARM_READ_REFUND: u32 = STORAGE_ACCESS_COLD_READ_COST - STORAGE_ACCESS_WARM_READ_COST;
+const WARM_WRITE_REFUND: u32 = STORAGE_ACCESS_COLD_WRITE_COST - STORAGE_ACCESS_WARM_WRITE_COST;
+const COLD_WRITE_AFTER_WARM_READ_REFUND: u32 = STORAGE_ACCESS_COLD_READ_COST;
+
+#[cfg(test)]
+mod tests {
+ use proptest::prelude::*;
+
+ use super::*;
+
+ proptest! {
+ #[test]
+ fn test_storage_changes(
+ initial_values in arbitrary_storage_changes(),
+ first_changes in arbitrary_storage_changes(),
+ second_changes in arbitrary_storage_changes(),
+ ) {
+ let storage_initial_values = initial_values
+ .iter()
+ .map(|(key, value)| (*key, Some(*value)))
+ .collect();
+ let mut world_diff = WorldDiff {
+ storage_initial_values,
+ ..WorldDiff::default()
+ };
+
+ let checkpoint1 = world_diff.snapshot();
+ for (key, value) in &first_changes {
+ world_diff.write_storage(&mut NoWorld, &mut (), key.0, key.1, *value);
+ }
+ assert_eq!(
+ world_diff
+ .get_storage_changes_after(&checkpoint1)
+ .collect::<BTreeMap<_, _>>(),
+ first_changes
+ .iter()
+ .map(|(key, value)| (
+ *key,
+ StorageChange {
+ before: initial_values.get(key).copied(),
+ after: *value,
+ is_initial: !initial_values.contains_key(key),
+ }
+ ))
+ .collect()
+ );
+
+ let checkpoint2 = world_diff.snapshot();
+ for (key, value) in &second_changes {
+ world_diff.write_storage(&mut NoWorld, &mut (), key.0, key.1, *value);
+ }
+ assert_eq!(
+ world_diff
+ .get_storage_changes_after(&checkpoint2)
+ .collect::<BTreeMap<_, _>>(),
+ second_changes
+ .iter()
+ .map(|(key, value)| (
+ *key,
+ StorageChange {
+ before: first_changes.get(key).or(initial_values.get(key)).copied(),
+ after: *value,
+ is_initial: !initial_values.contains_key(key),
+ }
+ ))
+ .collect()
+ );
+
+ let mut combined = first_changes
+ .into_iter()
+ .filter_map(|(key, value)| {
+ let initial = initial_values.get(&key).copied();
+ (initial.unwrap_or_default() != value).then_some((key, (initial, value)))
+ })
+ .collect::<BTreeMap<_, _>>();
+ for (key, value) in second_changes {
+ let initial = initial_values.get(&key).copied();
+ if initial.unwrap_or_default() == value {
+ combined.remove(&key);
+ } else {
+ combined.insert(key, (initial, value));
+ }
+ }
+
+ assert_eq!(combined, world_diff.get_storage_changes().collect());
+ }
+ }
+
+ fn arbitrary_storage_changes() -> impl Strategy<Value = BTreeMap<(H160, U256), U256>> {
+ any::<Vec<(([u8; 20], [u8; 32]), [u8; 32])>>().prop_map(|vec| {
+ vec.into_iter()
+ .map(|((contract, key), value)| {
+ ((H160::from(contract), U256::from(key)), U256::from(value))
+ })
+ .collect()
+ })
+ }
+
+ struct NoWorld;
+ impl StorageInterface for NoWorld {
+ fn read_storage(&mut self, _: H160, _: U256) -> Option<U256> {
+ None
+ }
+
+ fn cost_of_writing_storage(&mut self, _: Option<U256>, _: U256) -> u32 {
+ 0
+ }
+
+ fn is_free_storage_slot(&self, _: &H160, _: &U256) -> bool {
+ false
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +
//! # EraVM Stable Interface
+//!
+//! This crate defines an interface for tracers that will never change but may be extended.
+//! To be precise, a tracer using this interface will work in any VM written against that
+//! version or a newer one. Updating the tracer to depend on a newer interface version is
+//! not necessary. In fact, tracers should depend on the oldest version that has the required
+//! features.
+//!
+//! A struct implementing [`Tracer`] may read and mutate the VM's state via [`StateInterface`]
+//! when particular opcodes are executed.
+//!
+//! ## Why is extreme backwards compatibility required here?
+//!
+//! Suppose VM1 uses stable interface version 1 and VM2 uses stable interface version 2.
+//! With any sane design it would be trivial to take a tracer written for version 1 and
+//! update it to work with version 2. However, then it can no longer be used with VM1.
+//!
+//! This exact thing caused us a lot of trouble when we put many versions of `zk_evm` in `multivm`.
+//!
+//! ## How do I add a new feature to the interface?
+//!
+//! Do not change the existing traits. In fact, you should delete existing code in the new
+//! version that you publish and import it from the previous version instead.
+//!
+//! This is how you would add a new method to [`StateInterface`] and a new opcode.
+//!
+//! ```
+//! # use zksync_vm2_interface as zksync_vm2_interface_v1;
+//! use zksync_vm2_interface_v1::{
+//! StateInterface as StateInterfaceV1, Tracer as TracerV1, opcodes::NearCall,
+//! };
+//!
+//! trait StateInterface: StateInterfaceV1 {
+//! fn get_some_new_field(&self) -> u32;
+//! }
+//!
+//! pub struct NewOpcode;
+//!
+//! #[derive(PartialEq, Eq)]
+//! enum Opcode {
+//! NewOpcode,
+//! NearCall,
+//! // ...
+//! }
+//!
+//! trait OpcodeType {
+//! const VALUE: Opcode;
+//! }
+//!
+//! impl OpcodeType for NewOpcode {
+//! const VALUE: Opcode = Opcode::NewOpcode;
+//! }
+//!
+//! // Do this for every old opcode
+//! impl OpcodeType for NearCall {
+//! const VALUE: Opcode = Opcode::NearCall;
+//! }
+//!
+//! trait Tracer {
+//! fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+//! fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+//! }
+//!
+//! impl<T: TracerV1> Tracer for T {
+//! fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+//! match OP::VALUE {
+//! Opcode::NewOpcode => {}
+//! // Do this for every old opcode
+//! Opcode::NearCall => {
+//! <Self as TracerV1>::before_instruction::<NearCall, _>(self, state)
+//! }
+//! }
+//! }
+//! fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+//! }
+//!
+//! // Now you can use the new features by implementing TracerV2
+//! struct MyTracer;
+//! impl Tracer for MyTracer {
+//! fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+//! if OP::VALUE == Opcode::NewOpcode {
+//! state.get_some_new_field();
+//! }
+//! }
+//! }
+//! ```
+
+pub use self::{state_interface::*, tracer_interface::*};
+
+mod state_interface;
+mod tracer_interface;
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +
use primitive_types::{H160, U256};
+
+/// Public interface of the VM state. Encompasses both read and write methods.
+pub trait StateInterface {
+ /// Reads a register with the specified zero-based index. Returns a value together with a pointer flag.
+ fn read_register(&self, register: u8) -> (U256, bool);
+ /// Sets a register with the specified zero-based index
+ fn set_register(&mut self, register: u8, value: U256, is_pointer: bool);
+
+ /// Returns a mutable handle to the current call frame.
+ fn current_frame(&mut self) -> impl CallframeInterface + '_;
+ /// Returns the total number of call frames.
+ fn number_of_callframes(&self) -> usize;
+ /// Returns a mutable handle to a call frame with the specified index, where
+ /// zero is the current frame, one is the frame before that etc.
+ fn callframe(&mut self, n: usize) -> impl CallframeInterface + '_;
+
+ /// Reads a single byte from the specified heap at the specified 0-based offset.
+ fn read_heap_byte(&self, heap: HeapId, offset: u32) -> u8;
+ /// Reads an entire `U256` word in the big-endian order from the specified heap / `offset`
+ /// (which is the index of the most significant byte of the read value).
+ fn read_heap_u256(&self, heap: HeapId, offset: u32) -> U256;
+ /// Writes an entire `U256` word in the big-endian order to the specified heap at the specified `offset`
+ /// (which is the index of the most significant byte of the written value).
+ fn write_heap_u256(&mut self, heap: HeapId, offset: u32, value: U256);
+
+ /// Returns current execution flags.
+ fn flags(&self) -> Flags;
+ /// Sets current execution flags.
+ fn set_flags(&mut self, flags: Flags);
+
+ /// Returns the currently set 0-based transaction number.
+ fn transaction_number(&self) -> u16;
+ /// Sets the current transaction number.
+ fn set_transaction_number(&mut self, value: u16);
+
+ /// Returns the value of the context register.
+ fn context_u128_register(&self) -> u128;
+ /// Sets the value of the context register.
+ fn set_context_u128_register(&mut self, value: u128);
+
+ /// Iterates over storage slots read or written during VM execution.
+ fn get_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)>;
+ /// Iterates over all transient storage slots set during VM execution.
+ fn get_transient_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)>;
+ /// Gets value of the specified transient storage slot.
+ fn get_transient_storage(&self, address: H160, slot: U256) -> U256;
+ /// Sets value of the specified transient storage slot.
+ fn write_transient_storage(&mut self, address: H160, slot: U256, value: U256);
+
+ /// Iterates over events emitted during VM execution.
+ fn events(&self) -> impl Iterator<Item = Event>;
+ /// Iterates over L2-to-L1 logs emitted during VM execution.
+ fn l2_to_l1_logs(&self) -> impl Iterator<Item = L2ToL1Log>;
+
+ /// Gets the current amount of published pubdata.
+ fn pubdata(&self) -> i32;
+ /// Sets the current amount of published pubdata.
+ fn set_pubdata(&mut self, value: i32);
+}
+
+/// VM execution flags. See the EraVM reference for more details.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Flags {
+ /// "Less than" flag.
+ pub less_than: bool,
+ /// "Equal" flag.
+ pub equal: bool,
+ /// "Greater than" flag.
+ pub greater: bool,
+}
+
+/// Public interface of an EraVM call frame.
+pub trait CallframeInterface {
+ /// Address of the storage context associated with this frame. For delegate calls, this address is inherited from the calling contract;
+ /// otherwise, it's the same as [`Self::code_address()`].
+ fn address(&self) -> H160;
+ /// Sets the address of the executing contract.
+ fn set_address(&mut self, address: H160);
+ /// Address of the contract being executed.
+ fn code_address(&self) -> H160;
+ /// Sets the address of the contract being executed. Does not cause the contract at the specified address get loaded per se, just updates
+ /// the value used internally by the VM (e.g., returned by the [`CodeAddress`](crate::opcodes::CodeAddress) opcode).
+ fn set_code_address(&mut self, address: H160);
+ /// Address of the calling contract. Respects delegate and mimic calls.
+ fn caller(&self) -> H160;
+ /// Sets the address of the calling contract.
+ fn set_caller(&mut self, address: H160);
+
+ /// Returns the current program counter (i.e., 0-based index of the instruction being executed).
+ /// During panic this returns `None`.
+ fn program_counter(&self) -> Option<u16>;
+ /// Sets the program counter.
+ /// The VM will execute an invalid instruction if you jump out of the program.
+ fn set_program_counter(&mut self, value: u16);
+
+ /// Returns the program counter that the parent frame should continue from if this frame fails.
+ fn exception_handler(&self) -> u16;
+ /// Sets the exception handler as specified [above](Self::exception_handler()).
+ fn set_exception_handler(&mut self, value: u16);
+
+ /// Checks whether the call is static.
+ fn is_static(&self) -> bool;
+ /// Checks whether the call is executed in kernel mode.
+ fn is_kernel(&self) -> bool;
+
+ /// Returns the remaining amount of gas.
+ fn gas(&self) -> u32;
+ /// Sets the remaining amount of gas.
+ fn set_gas(&mut self, new_gas: u32);
+ /// Additional gas provided for the duration of this callframe.
+ fn stipend(&self) -> u32;
+
+ /// Returns the context value for this call. This context is accessible via [`ContextU128`](crate::opcodes::ContextU128) opcode.
+ fn context_u128(&self) -> u128;
+ /// Sets the context value for this call.
+ fn set_context_u128(&mut self, value: u128);
+
+ /// Checks whether this frame corresponds to a near call.
+ fn is_near_call(&self) -> bool;
+
+ /// Reads the specified stack slot. Returns a value together with a pointer flag.
+ fn read_stack(&self, index: u16) -> (U256, bool);
+ /// Sets the value and pointer flag for the specified stack slot.
+ fn write_stack(&mut self, index: u16, value: U256, is_pointer: bool);
+
+ /// Returns the stack pointer.
+ fn stack_pointer(&self) -> u16;
+ /// Sets the stack pointer.
+ fn set_stack_pointer(&mut self, value: u16);
+
+ /// Returns ID of the main heap used in this call.
+ fn heap(&self) -> HeapId;
+ /// Returns the main heap boundary (number of paid bytes).
+ fn heap_bound(&self) -> u32;
+ /// Sets the main heap boundary.
+ fn set_heap_bound(&mut self, value: u32);
+
+ /// Returns ID of the auxiliary heap used in this call.
+ fn aux_heap(&self) -> HeapId;
+ /// Returns the auxiliary heap boundary (number of paid bytes).
+ fn aux_heap_bound(&self) -> u32;
+ /// Sets the auxiliary heap boundary.
+ fn set_aux_heap_bound(&mut self, value: u32);
+
+ /// Reads a word from the bytecode of the executing contract.
+ fn read_contract_code(&self, slot: u16) -> U256;
+}
+
+/// Identifier of a VM heap.
+///
+/// EraVM docs sometimes refer to heaps as *heap pages*; docs in these crate don't to avoid confusion with internal heap structure.
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct HeapId(u32);
+
+impl HeapId {
+ /// Identifier of the calldata heap used by the first executed program (i.e., the bootloader).
+ pub const FIRST_CALLDATA: Self = Self(1);
+ /// Identifier of the heap used by the first executed program (i.e., the bootloader).
+ pub const FIRST: Self = Self(2);
+ /// Identifier of the auxiliary heap used by the first executed program (i.e., the bootloader)
+ pub const FIRST_AUX: Self = Self(3);
+
+ /// Only for dealing with external data structures, never use internally.
+ #[doc(hidden)]
+ pub const fn from_u32_unchecked(value: u32) -> Self {
+ Self(value)
+ }
+
+ /// Converts this ID to an integer value.
+ pub const fn as_u32(self) -> u32 {
+ self.0
+ }
+}
+
+/// Event emitted by EraVM.
+///
+/// There is no address field because nobody is interested in events that don't come
+/// from the event writer, so we simply do not record events coming from anywhere else.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub struct Event {
+ /// Event key.
+ pub key: U256,
+ /// Event value.
+ pub value: U256,
+ /// Is this event first in a chain of events?
+ pub is_first: bool,
+ /// Shard identifier (currently, always set to 0).
+ pub shard_id: u8,
+ /// 0-based index of a transaction that has emitted this event.
+ pub tx_number: u16,
+}
+
+/// L2-to-L1 log emitted by EraVM.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub struct L2ToL1Log {
+ /// Log key.
+ pub key: U256,
+ /// Log value.
+ pub value: U256,
+ /// Is this a service log?
+ pub is_service: bool,
+ /// Address of the contract that has emitted this log.
+ pub address: H160,
+ /// Shard identifier (currently, always set to 0).
+ pub shard_id: u8,
+ /// 0-based index of a transaction that has emitted this event.
+ pub tx_number: u16,
+}
+
+#[cfg(test)]
+#[derive(Debug)]
+pub struct DummyState;
+
+#[cfg(test)]
+impl StateInterface for DummyState {
+ fn read_register(&self, _: u8) -> (U256, bool) {
+ unimplemented!()
+ }
+
+ fn set_register(&mut self, _: u8, _: U256, _: bool) {
+ unimplemented!()
+ }
+
+ fn current_frame(&mut self) -> impl CallframeInterface + '_ {
+ DummyState
+ }
+
+ fn number_of_callframes(&self) -> usize {
+ unimplemented!()
+ }
+
+ fn callframe(&mut self, _: usize) -> impl CallframeInterface + '_ {
+ DummyState
+ }
+
+ fn read_heap_byte(&self, _: HeapId, _: u32) -> u8 {
+ unimplemented!()
+ }
+
+ fn read_heap_u256(&self, _: HeapId, _: u32) -> U256 {
+ unimplemented!()
+ }
+
+ fn write_heap_u256(&mut self, _: HeapId, _: u32, _: U256) {
+ unimplemented!()
+ }
+
+ fn flags(&self) -> Flags {
+ unimplemented!()
+ }
+
+ fn set_flags(&mut self, _: Flags) {
+ unimplemented!()
+ }
+
+ fn transaction_number(&self) -> u16 {
+ unimplemented!()
+ }
+
+ fn set_transaction_number(&mut self, _: u16) {
+ unimplemented!()
+ }
+
+ fn context_u128_register(&self) -> u128 {
+ unimplemented!()
+ }
+
+ fn set_context_u128_register(&mut self, _: u128) {
+ unimplemented!()
+ }
+
+ fn get_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)> {
+ std::iter::empty()
+ }
+
+ fn get_transient_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)> {
+ std::iter::empty()
+ }
+
+ fn get_transient_storage(&self, _: H160, _: U256) -> U256 {
+ unimplemented!()
+ }
+
+ fn write_transient_storage(&mut self, _: H160, _: U256, _: U256) {
+ unimplemented!()
+ }
+
+ fn events(&self) -> impl Iterator<Item = Event> {
+ std::iter::empty()
+ }
+
+ fn l2_to_l1_logs(&self) -> impl Iterator<Item = L2ToL1Log> {
+ std::iter::empty()
+ }
+
+ fn pubdata(&self) -> i32 {
+ unimplemented!()
+ }
+
+ fn set_pubdata(&mut self, _: i32) {
+ unimplemented!()
+ }
+}
+
+#[cfg(test)]
+impl CallframeInterface for DummyState {
+ fn address(&self) -> H160 {
+ unimplemented!()
+ }
+
+ fn set_address(&mut self, _: H160) {
+ unimplemented!()
+ }
+
+ fn code_address(&self) -> H160 {
+ unimplemented!()
+ }
+
+ fn set_code_address(&mut self, _: H160) {
+ unimplemented!()
+ }
+
+ fn caller(&self) -> H160 {
+ unimplemented!()
+ }
+
+ fn set_caller(&mut self, _: H160) {
+ unimplemented!()
+ }
+
+ fn program_counter(&self) -> Option<u16> {
+ unimplemented!()
+ }
+
+ fn set_program_counter(&mut self, _: u16) {
+ unimplemented!()
+ }
+
+ fn exception_handler(&self) -> u16 {
+ unimplemented!()
+ }
+
+ fn set_exception_handler(&mut self, _: u16) {
+ unimplemented!()
+ }
+
+ fn is_static(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn is_kernel(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn gas(&self) -> u32 {
+ unimplemented!()
+ }
+
+ fn set_gas(&mut self, _: u32) {
+ unimplemented!()
+ }
+
+ fn stipend(&self) -> u32 {
+ unimplemented!()
+ }
+
+ fn context_u128(&self) -> u128 {
+ unimplemented!()
+ }
+
+ fn set_context_u128(&mut self, _: u128) {
+ unimplemented!()
+ }
+
+ fn is_near_call(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn read_stack(&self, _: u16) -> (U256, bool) {
+ unimplemented!()
+ }
+
+ fn write_stack(&mut self, _: u16, _: U256, _: bool) {
+ unimplemented!()
+ }
+
+ fn stack_pointer(&self) -> u16 {
+ unimplemented!()
+ }
+
+ fn set_stack_pointer(&mut self, _: u16) {
+ unimplemented!()
+ }
+
+ fn heap(&self) -> HeapId {
+ unimplemented!()
+ }
+
+ fn heap_bound(&self) -> u32 {
+ unimplemented!()
+ }
+
+ fn set_heap_bound(&mut self, _: u32) {
+ unimplemented!()
+ }
+
+ fn aux_heap(&self) -> HeapId {
+ unimplemented!()
+ }
+
+ fn aux_heap_bound(&self) -> u32 {
+ unimplemented!()
+ }
+
+ fn set_aux_heap_bound(&mut self, _: u32) {
+ unimplemented!()
+ }
+
+ fn read_contract_code(&self, _: u16) -> U256 {
+ unimplemented!()
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +
use crate::StateInterface;
+
+macro_rules! forall_simple_opcodes {
+ ($m:ident) => {
+ $m!(Nop);
+ $m!(Add);
+ $m!(Sub);
+ $m!(And);
+ $m!(Or);
+ $m!(Xor);
+ $m!(ShiftLeft);
+ $m!(ShiftRight);
+ $m!(RotateLeft);
+ $m!(RotateRight);
+ $m!(Mul);
+ $m!(Div);
+ $m!(NearCall);
+ $m!(Jump);
+ $m!(Event);
+ $m!(L2ToL1Message);
+ $m!(Decommit);
+ $m!(This);
+ $m!(Caller);
+ $m!(CodeAddress);
+ $m!(ErgsLeft);
+ $m!(SP);
+ $m!(ContextMeta);
+ $m!(ContextU128);
+ $m!(SetContextU128);
+ $m!(IncrementTxNumber);
+ $m!(AuxMutating0);
+ $m!(PrecompileCall);
+ $m!(HeapRead);
+ $m!(HeapWrite);
+ $m!(AuxHeapRead);
+ $m!(AuxHeapWrite);
+ $m!(PointerRead);
+ $m!(PointerAdd);
+ $m!(PointerSub);
+ $m!(PointerPack);
+ $m!(PointerShrink);
+ $m!(StorageRead);
+ $m!(StorageWrite);
+ $m!(TransientStorageRead);
+ $m!(TransientStorageWrite);
+ };
+}
+
+macro_rules! pub_struct {
+ ($x:ident) => {
+ #[doc = concat!("`", stringify!($x), "` opcode.")]
+ #[derive(Debug)]
+ pub struct $x;
+ };
+}
+
+/// EraVM opcodes.
+pub mod opcodes {
+ use std::marker::PhantomData;
+
+ use super::{CallingMode, ReturnType};
+
+ forall_simple_opcodes!(pub_struct);
+
+ /// `FarCall` group of opcodes distinguished by the calling mode (normal, delegate, or mimic).
+ #[derive(Debug)]
+ pub struct FarCall<M: TypeLevelCallingMode>(PhantomData<M>);
+
+ /// `Ret` group of opcodes distinguished by the return type (normal, panic, or revert).
+ #[derive(Debug)]
+ pub struct Ret<T: TypeLevelReturnType>(PhantomData<T>);
+
+ /// Normal [`Ret`]urn mode / [`FarCall`] mode.
+ #[derive(Debug)]
+ pub struct Normal;
+
+ /// Delegate [`FarCall`] mode.
+ #[derive(Debug)]
+ pub struct Delegate;
+
+ /// Mimic [`FarCall`] mode.
+ #[derive(Debug)]
+ pub struct Mimic;
+
+ /// Revert [`Ret`]urn mode.
+ #[derive(Debug)]
+ pub struct Revert;
+
+ /// Panic [`Ret`]urn mode.
+ #[derive(Debug)]
+ pub struct Panic;
+
+ /// Calling mode for the [`FarCall`] opcodes.
+ pub trait TypeLevelCallingMode {
+ /// Constant corresponding to this mode allowing to easily `match` it.
+ const VALUE: CallingMode;
+ }
+
+ impl TypeLevelCallingMode for Normal {
+ const VALUE: CallingMode = CallingMode::Normal;
+ }
+
+ impl TypeLevelCallingMode for Delegate {
+ const VALUE: CallingMode = CallingMode::Delegate;
+ }
+
+ impl TypeLevelCallingMode for Mimic {
+ const VALUE: CallingMode = CallingMode::Mimic;
+ }
+
+ /// Return type for the [`Ret`] opcodes.
+ pub trait TypeLevelReturnType {
+ /// Constant corresponding to this return type allowing to easily `match` it.
+ const VALUE: ReturnType;
+ }
+
+ impl TypeLevelReturnType for Normal {
+ const VALUE: ReturnType = ReturnType::Normal;
+ }
+
+ impl TypeLevelReturnType for Revert {
+ const VALUE: ReturnType = ReturnType::Revert;
+ }
+
+ impl TypeLevelReturnType for Panic {
+ const VALUE: ReturnType = ReturnType::Panic;
+ }
+}
+
+/// All supported EraVM opcodes in a single enumeration.
+#[allow(missing_docs)]
+#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash)]
+pub enum Opcode {
+ Nop,
+ Add,
+ Sub,
+ And,
+ Or,
+ Xor,
+ ShiftLeft,
+ ShiftRight,
+ RotateLeft,
+ RotateRight,
+ Mul,
+ Div,
+ NearCall,
+ FarCall(CallingMode),
+ Ret(ReturnType),
+ Jump,
+ Event,
+ L2ToL1Message,
+ Decommit,
+ This,
+ Caller,
+ CodeAddress,
+ ErgsLeft,
+ SP,
+ ContextMeta,
+ ContextU128,
+ SetContextU128,
+ IncrementTxNumber,
+ AuxMutating0,
+ PrecompileCall,
+ HeapRead,
+ HeapWrite,
+ AuxHeapRead,
+ AuxHeapWrite,
+ PointerRead,
+ PointerAdd,
+ PointerSub,
+ PointerPack,
+ PointerShrink,
+ StorageRead,
+ StorageWrite,
+ TransientStorageRead,
+ TransientStorageWrite,
+}
+
+/// All supported calling modes for [`FarCall`](opcodes::FarCall) opcode.
+#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash)]
+pub enum CallingMode {
+ /// Normal calling mode.
+ Normal,
+ /// Delegate calling mode (similar to `delegatecall` in EVM).
+ Delegate,
+ /// Mimic calling mode (can only be used by system contracts; allows to emulate `eth_call` semantics while retaining the bootloader).
+ Mimic,
+}
+
+/// All supported return types for the [`Ret`](opcodes::Ret) opcode.
+#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash)]
+pub enum ReturnType {
+ /// Normal return.
+ Normal,
+ /// Revert (e.g., a result of a Solidity `revert`).
+ Revert,
+ /// Panic, i.e. a non-revert abnormal control flow termination (e.g., out of gas).
+ Panic,
+}
+
+impl ReturnType {
+ /// Checks if this return type is [normal](Self::Normal).
+ pub fn is_failure(&self) -> bool {
+ *self != ReturnType::Normal
+ }
+}
+
+/// Trait mapping opcodes as types to the corresponding variants of the [`Opcode`] enum.
+pub trait OpcodeType {
+ /// `Opcode` variant corresponding to this opcode type.
+ const VALUE: Opcode;
+}
+
+macro_rules! impl_opcode {
+ ($x:ident) => {
+ impl OpcodeType for opcodes::$x {
+ const VALUE: Opcode = Opcode::$x;
+ }
+ };
+}
+
+forall_simple_opcodes!(impl_opcode);
+
+impl<M: opcodes::TypeLevelCallingMode> OpcodeType for opcodes::FarCall<M> {
+ const VALUE: Opcode = Opcode::FarCall(M::VALUE);
+}
+
+impl<T: opcodes::TypeLevelReturnType> OpcodeType for opcodes::Ret<T> {
+ const VALUE: Opcode = Opcode::Ret(T::VALUE);
+}
+
+/// EraVM instruction tracer.
+///
+/// [`Self::before_instruction()`] is called just before the actual instruction is executed.
+/// If the instruction is skipped, `before_instruction` will be called with [`Nop`](opcodes::Nop).
+/// [`Self::after_instruction()`] is called once the instruction is executed and the program
+/// counter has advanced.
+///
+/// # Examples
+///
+/// Here `FarCallCounter` counts the number of far calls.
+///
+/// ```
+/// # use zksync_vm2_interface::{Tracer, StateInterface, OpcodeType, Opcode};
+/// struct FarCallCounter(usize);
+///
+/// impl Tracer for FarCallCounter {
+/// fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+/// match OP::VALUE {
+/// Opcode::FarCall(_) => self.0 += 1,
+/// _ => {}
+/// }
+/// }
+/// }
+/// ```
+pub trait Tracer {
+ /// Executes logic before an instruction handler.
+ ///
+ /// The default implementation does nothing.
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+ /// Executes logic after an instruction handler.
+ ///
+ /// The default implementation does nothing.
+ fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+
+ /// Provides cycle statistics for "complex" instructions from the prover perspective (mostly precompile calls).
+ ///
+ /// The default implementation does nothing.
+ fn on_extra_prover_cycles(&mut self, _stats: CycleStats) {}
+}
+
+/// Cycle statistics emitted by the VM and supplied to [`Tracer::on_extra_prover_cycles()`].
+#[derive(Debug, Clone, Copy)]
+pub enum CycleStats {
+ /// Call to the `keccak256` precompile with the specified number of hash cycles.
+ Keccak256(u32),
+ /// Call to the `sha256` precompile with the specified number of hash cycles.
+ Sha256(u32),
+ /// Call to the `ecrecover` precompile with the specified number of hash cycles.
+ EcRecover(u32),
+ /// Call to the `secp256r1_verify` precompile with the specified number of hash cycles.
+ Secp256r1Verify(u32),
+ /// Decommitting an opcode.
+ Decommit(u32),
+ /// Reading a slot from the VM storage.
+ StorageRead,
+ /// Writing a slot to the VM storage.
+ StorageWrite,
+}
+
+/// No-op tracer implementation.
+impl Tracer for () {}
+
+// Multiple tracers can be combined by building a linked list out of tuples.
+impl<A: Tracer, B: Tracer> Tracer for (A, B) {
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+ self.0.before_instruction::<OP, S>(state);
+ self.1.before_instruction::<OP, S>(state);
+ }
+
+ fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+ self.0.after_instruction::<OP, S>(state);
+ self.1.after_instruction::<OP, S>(state);
+ }
+
+ fn on_extra_prover_cycles(&mut self, stats: CycleStats) {
+ self.0.on_extra_prover_cycles(stats);
+ self.1.on_extra_prover_cycles(stats);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{CallingMode, OpcodeType};
+ use crate::{opcodes, DummyState, Tracer};
+
+ struct FarCallCounter(usize);
+
+ impl Tracer for FarCallCounter {
+ fn before_instruction<OP: OpcodeType, S: crate::StateInterface>(&mut self, _: &mut S) {
+ if let super::Opcode::FarCall(CallingMode::Normal) = OP::VALUE {
+ self.0 += 1;
+ }
+ }
+ }
+
+ #[test]
+ fn test_tracer() {
+ let mut tracer = FarCallCounter(0);
+
+ tracer.before_instruction::<opcodes::Nop, _>(&mut DummyState);
+ assert_eq!(tracer.0, 0);
+
+ tracer.before_instruction::<opcodes::FarCall<opcodes::Normal>, _>(&mut DummyState);
+ assert_eq!(tracer.0, 1);
+
+ tracer.before_instruction::<opcodes::FarCall<opcodes::Mimic>, _>(&mut DummyState);
+ assert_eq!(tracer.0, 1);
+ }
+
+ #[test]
+ fn test_aggregate_tracer() {
+ let mut tracer = (FarCallCounter(0), (FarCallCounter(0), FarCallCounter(0)));
+
+ tracer.before_instruction::<opcodes::Nop, _>(&mut DummyState);
+ assert_eq!(tracer.0 .0, 0);
+ assert_eq!(tracer.1 .0 .0, 0);
+ assert_eq!(tracer.1 .1 .0, 0);
+
+ tracer.before_instruction::<opcodes::FarCall<opcodes::Normal>, _>(&mut DummyState);
+ assert_eq!(tracer.0 .0, 1);
+ assert_eq!(tracer.1 .0 .0, 1);
+ assert_eq!(tracer.1 .1 .0, 1);
+ }
+}
+
fn:
) to \
+ restrict the search to a given item kind.","Accepted kinds are: fn
, mod
, struct
, \
+ enum
, trait
, type
, macro
, \
+ and const
.","Search functions by type signature (e.g., vec -> usize
or \
+ -> vec
or String, enum:Cow -> bool
)","You can look for items with an exact name by putting double quotes around \
+ your request: \"string\"
","Look for functions that accept or return \
+ slices and \
+ arrays by writing \
+ square brackets (e.g., -> [u8]
or [] -> Option
)","Look for items inside another one by searching for a path: vec::Vec
",].map(x=>""+x+"
").join("");const div_infos=document.createElement("div");addClass(div_infos,"infos");div_infos.innerHTML="${value.replaceAll(" ", " ")}
`}else{error[index]=value}});output+=`pub enum AnyDestination {
+ Register1(Register1),
+ AbsoluteStack(AbsoluteStack),
+ RelativeStack(RelativeStack),
+ AdvanceStackPointer(AdvanceStackPointer),
+}
All supported addressing modes for the first destination argument.
+Register mode.
+Absolute stack addressing.
+Relative stack addressing.
+Relative stack addressing that updates the stack pointer on access.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)pub enum AnySource {
+ Register1(Register1),
+ Immediate1(Immediate1),
+ AbsoluteStack(AbsoluteStack),
+ RelativeStack(RelativeStack),
+ AdvanceStackPointer(AdvanceStackPointer),
+ CodePage(CodePage),
+}
All supported addressing modes for the first source argument.
+Register mode.
+Immediate mode.
+Absolute stack addressing.
+Relative stack addressing.
+Relative stack addressing that updates the stack pointer on access.
+Addressing into the code page of the executing contract.
+clone_to_uninit
)clone_to_uninit
)pub enum RegisterOrImmediate {
+ Register1(Register1),
+ Immediate1(Immediate1),
+}
Register or immediate addressing modes required by some VM instructions.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)Addressing modes supported by EraVM.
+RelativeStack
, but moves the stack pointer on access (decreases it when reading data;
+increases when writing data).AnySource
to RegisterOrImmediate
.AbsoluteStack
, RelativeStack
,
+AdvanceStackPointer
and CodePage
addressing modes.pub struct AbsoluteStack(pub RegisterAndImmediate);
Absolute addressing into stack.
+0: RegisterAndImmediate
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct AdvanceStackPointer(pub RegisterAndImmediate);
Same as RelativeStack
, but moves the stack pointer on access (decreases it when reading data;
+increases when writing data).
0: RegisterAndImmediate
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct Arguments { /* private fields */ }
Arguments provided to an instruction in an EraVM bytecode.
+pub struct CodePage(pub RegisterAndImmediate);
Absolute addressing into the code page of the currently executing program.
+0: RegisterAndImmediate
clone_to_uninit
)clone_to_uninit
)pub struct Immediate1(pub u16);
Immediate value passed as a first instruction arg.
+0: u16
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct Immediate2(pub u16);
Immediate value passed as a second instruction arg.
+0: u16
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct NotRegisterOrImmediate;
Error converting AnySource
to RegisterOrImmediate
.
pub struct Register(/* private fields */);
Representation of one of 16 VM registers.
+clone_to_uninit
)clone_to_uninit
)pub struct Register1(pub Register);
Register passed as a first instruction argument.
+It must not be used simultaneously with AbsoluteStack
, RelativeStack
, AdvanceStackPointer
,
+or CodePage
.
0: Register
clone_to_uninit
)clone_to_uninit
)pub struct Register2(pub Register);
Register passed as a second instruction argument.
+0: Register
clone_to_uninit
)clone_to_uninit
)pub struct RegisterAndImmediate {
+ pub immediate: u16,
+ pub register: Register,
+}
Combination of a register and an immediate value wrapped by AbsoluteStack
, RelativeStack
,
+AdvanceStackPointer
and CodePage
addressing modes.
immediate: u16
Immediate value.
+register: Register
Register spec.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct RelativeStack(pub RegisterAndImmediate);
Relative addressing into stack (relative to the VM stack pointer).
+0: RegisterAndImmediate
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub enum ExecutionEnd {
+ ProgramFinished(Vec<u8>),
+ Reverted(Vec<u8>),
+ Panicked,
+ SuspendedOnHook(u32),
+}
VM stop reason returned from VirtualMachine::run()
.
The executed program has finished and returned the specified data.
+The executed program has reverted returning the specified data.
+The executed program has panicked.
+Returned when the bootloader writes to the heap location specified by hook_address
.
#[repr(u8)]pub enum Predicate {
+ Always = 8,
+ IfGT = 4,
+ IfEQ = 2,
+ IfLT = 1,
+ IfGE = 6,
+ IfLE = 3,
+ IfNotEQ = 40,
+ IfGTOrLT = 5,
+}
Predicate for an instruction. Encoded so that comparing it to flags is efficient.
+Always execute the associated instruction.
+Execute the associated instruction if the “greater than” execution flag is set.
+Execute the associated instruction if the “equal” execution flag is set.
+Execute the associated instruction if the “less than” execution flag is set.
+Execute the associated instruction if either of “greater than” or “equal” execution flags are set.
+Execute the associated instruction if either of “less than” or “equal” execution flags are set.
+Execute the associated instruction if the “equal” execution flag is not set.
+Execute the associated instruction if either of “less than” or “greater than” execution flags are set.
+clone_to_uninit
)clone_to_uninit
)Redirecting to ../../zksync_vm2/struct.FatPointer.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/index.html b/zksync_vm2/index.html new file mode 100644 index 0000000..6c4619c --- /dev/null +++ b/zksync_vm2/index.html @@ -0,0 +1,6 @@ +This crate provides high-performance VirtualMachine
for ZKsync Era.
pub use zksync_vm2_interface as interface;
Arguments
).VirtualMachine
settings.WorldDiff
output by its eponymous method.
+Can be provided to WorldDiff::events_after()
etc. to get data after the snapshot was created.VirtualMachine::run()
.Redirecting to ../../zksync_vm2/enum.ExecutionEnd.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/instruction/struct.Instruction.html b/zksync_vm2/instruction/struct.Instruction.html new file mode 100644 index 0000000..f51d5ee --- /dev/null +++ b/zksync_vm2/instruction/struct.Instruction.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.Instruction.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/mode_requirements/struct.ModeRequirements.html b/zksync_vm2/mode_requirements/struct.ModeRequirements.html new file mode 100644 index 0000000..d7492ec --- /dev/null +++ b/zksync_vm2/mode_requirements/struct.ModeRequirements.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.ModeRequirements.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/predication/enum.Predicate.html b/zksync_vm2/predication/enum.Predicate.html new file mode 100644 index 0000000..f0a3334 --- /dev/null +++ b/zksync_vm2/predication/enum.Predicate.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/enum.Predicate.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/program/struct.Program.html b/zksync_vm2/program/struct.Program.html new file mode 100644 index 0000000..a06e204 --- /dev/null +++ b/zksync_vm2/program/struct.Program.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.Program.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/sidebar-items.js b/zksync_vm2/sidebar-items.js new file mode 100644 index 0000000..3221888 --- /dev/null +++ b/zksync_vm2/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"enum":["ExecutionEnd","Predicate"],"mod":["addressing_modes","testonly"],"struct":["FatPointer","Instruction","ModeRequirements","Program","Settings","Snapshot","StorageChange","VirtualMachine","WorldDiff"],"trait":["StorageInterface","World"]}; \ No newline at end of file diff --git a/zksync_vm2/struct.FatPointer.html b/zksync_vm2/struct.FatPointer.html new file mode 100644 index 0000000..40e81e8 --- /dev/null +++ b/zksync_vm2/struct.FatPointer.html @@ -0,0 +1,23 @@ +#[repr(C)]pub struct FatPointer {
+ pub offset: u32,
+ pub memory_page: HeapId,
+ pub start: u32,
+ pub length: u32,
+}
Fat pointer to a heap location.
+offset: u32
Additional pointer offset inside the start..(start + length)
range.
memory_page: HeapId
ID of the heap this points to.
+start: u32
0-based index of the pointer start byte at the memory
page.
length: u32
Length of the pointed slice in bytes.
+pub struct Instruction<T, W> { /* private fields */ }
Instructions for binary operations.
+Creates Add
instruction with the provided params.
Creates Sub
instruction with the provided params.
Creates And
instruction with the provided params.
Creates Or
instruction with the provided params.
Creates Xor
instruction with the provided params.
Creates ShiftLeft
instruction with the provided params.
Creates ShiftRight
instruction with the provided params.
Creates RotateLeft
instruction with the provided params.
Creates RotateRight
instruction with the provided params.
Context-related instructions.
+Creates a This
instruction with the provided params.
Creates a Caller
instruction with the provided params.
Creates a CodeAddress
instruction with the provided params.
Creates an ErgsLeft
instruction with the provided params.
Creates a ContextU128
instruction with the provided params.
Creates an SP
instruction with the provided params.
Creates a ContextMeta
instruction with the provided params.
Creates a SetContextU128
instruction with the provided params.
Creates an IncrementTxNumber
instruction with the provided params.
Creates an AuxMutating0
instruction with the provided params.
Creates an Event
instruction with the provided params.
Creates an L2ToL1Message
instruction with the provided params.
Creates a FarCall
instruction with the provided mode and params.
Creates a HeapRead
instruction with the provided params.
Creates an AuxHeapRead
instruction with the provided params.
Creates a HeapWrite
instruction with the provided params.
Creates an AuxHeapWrite
instruction with the provided params.
Creates an PointerRead
instruction with the provided params.
Creates a NearCall
instruction with the provided params.
Creates a Nop
instruction with the provided params.
Pointer-related instructions.
+Creates a PointerAdd
instruction with the provided params.
Creates a PointerSub
instruction with the provided params.
Creates a PointerPack
instruction with the provided params.
Creates a PointerShrink
instruction with the provided params.
Creates a PrecompileCall
instruction with the provided params.
Variations of Ret
instructions.
Creates a normal Ret
instruction with the provided params.
Creates a revert Ret
instruction with the provided params.
Creates a panic Ret
instruction with the provided params.
Creates a invalid instruction that will panic by draining all gas.
+Creates a StorageWrite
instruction with the provided params.
Creates a TransientStorageWrite
instruction with the provided params.
Creates a StorageRead
instruction with the provided params.
Creates a TransientStorageRead
instruction with the provided params.
pub struct ModeRequirements(/* private fields */);
VM execution mode requirements (kernel only, not in static call) that can be placed on instructions.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)pub struct Program<T, W> { /* private fields */ }
Compiled EraVM bytecode.
+Cloning this is cheap. It is a handle to memory similar to Arc
.
clone_to_uninit
)pub struct Settings {
+ pub default_aa_code_hash: [u8; 32],
+ pub evm_interpreter_code_hash: [u8; 32],
+ pub hook_address: u32,
+}
VirtualMachine
settings.
default_aa_code_hash: [u8; 32]
Bytecode hash of the default account abstraction contract.
+evm_interpreter_code_hash: [u8; 32]
Bytecode hash of the EVM interpreter.
+hook_address: u32
Writing to this address in the bootloader’s heap suspends execution
+clone_to_uninit
)pub struct Snapshot { /* private fields */ }
Opaque snapshot of a WorldDiff
output by its eponymous method.
+Can be provided to WorldDiff::events_after()
etc. to get data after the snapshot was created.
clone_to_uninit
)pub struct StorageChange {
+ pub before: Option<U256>,
+ pub after: U256,
+ pub is_initial: bool,
+}
Change in a single storage slot.
+before: Option<U256>
Value before the slot was written to. None
if the slot was not written to previously.
after: U256
Value written to the slot.
+is_initial: bool
true
if the slot is not set in the World
.
+A write may be initial even if it isn’t the first write to a slot!
pub struct VirtualMachine<T, W> { /* private fields */ }
High-performance out-of-circuit EraVM implementation.
+Creates a new VM instance.
+Provides a reference to the World
diff accumulated by VM execution so far.
Returns how much of the extra gas limit is left and the stop reason, +unless the extra gas limit was exceeded.
+Needed to support account validation gas limit. +We cannot simply reduce the available gas, as contracts might behave differently +depending on remaining gas.
+Creates a VM snapshot. The snapshot can then be rolled back to, or discarded.
+Returns the VM to the state it was in when Self::make_snapshot()
was called.
Pops a previously made snapshot without rolling back to it. This effectively commits +all changes made up to this point, so that they cannot be rolled back.
+U256
word in the big-endian order from the specified heap / offset
+(which is the index of the most significant byte of the read value).U256
word in the big-endian order to the specified heap at the specified offset
+(which is the index of the most significant byte of the written value).pub struct WorldDiff { /* private fields */ }
Pending modifications to the global state that are executed at the end of a block. +In other words, side effects.
+Returns recorded refunds for all storage operations.
+Returns recorded pubdata costs for all storage operations.
+Gets changes for all touched storage slots.
+Gets changes for storage slots touched after the specified snapshot
was created.
Returns events emitted after the specified snapshot
was created.
Returns L2-to-L1 logs emitted after the specified snapshot
was created.
Returns hashes of decommitted contract bytecodes in no particular order. Note that this includes +failed (out-of-gas) decommitments.
+Get a snapshot for selecting which logs & co. to output using Self::events_after()
and other methods.
pub struct TestWorld<T> { /* private fields */ }
Test World
implementation.
pub trait StorageInterface {
+ // Required methods
+ fn read_storage(&mut self, contract: H160, key: U256) -> Option<U256>;
+ fn cost_of_writing_storage(
+ &mut self,
+ initial_value: Option<U256>,
+ new_value: U256,
+ ) -> u32;
+ fn is_free_storage_slot(&self, contract: &H160, key: &U256) -> bool;
+}
VM storage access operations.
+Reads the specified slot from the storage.
+There is no write counterpart; WorldDiff::get_storage_changes()
gives a list of all storage changes.
Computes the cost of writing a storage slot.
+Returns if the storage slot is free both in terms of gas and pubdata.
+pub trait World<T: Tracer>: StorageInterface + Sized {
+ // Required methods
+ fn decommit(&mut self, hash: U256) -> Program<T, Self>;
+ fn decommit_code(&mut self, hash: U256) -> Vec<u8> ⓘ;
+}
Encapsulates VM interaction with the external world. This includes VM storage and decomitting (loading) bytecodes +for execution.
+Loads a bytecode with the specified hash.
+This method will be called every time a contract is called. Caching and decoding is +the world implementor’s job.
+Loads bytecode bytes for the decommit
opcode.
Redirecting to ../../zksync_vm2/struct.Settings.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/vm/struct.VirtualMachine.html b/zksync_vm2/vm/struct.VirtualMachine.html new file mode 100644 index 0000000..237e7e0 --- /dev/null +++ b/zksync_vm2/vm/struct.VirtualMachine.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.VirtualMachine.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/world_diff/struct.Snapshot.html b/zksync_vm2/world_diff/struct.Snapshot.html new file mode 100644 index 0000000..ba88dcc --- /dev/null +++ b/zksync_vm2/world_diff/struct.Snapshot.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.Snapshot.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/world_diff/struct.StorageChange.html b/zksync_vm2/world_diff/struct.StorageChange.html new file mode 100644 index 0000000..6e7a942 --- /dev/null +++ b/zksync_vm2/world_diff/struct.StorageChange.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.StorageChange.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2/world_diff/struct.WorldDiff.html b/zksync_vm2/world_diff/struct.WorldDiff.html new file mode 100644 index 0000000..8ac40c4 --- /dev/null +++ b/zksync_vm2/world_diff/struct.WorldDiff.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2/struct.WorldDiff.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/all.html b/zksync_vm2_interface/all.html new file mode 100644 index 0000000..f859fae --- /dev/null +++ b/zksync_vm2_interface/all.html @@ -0,0 +1 @@ +pub enum CallingMode {
+ Normal,
+ Delegate,
+ Mimic,
+}
All supported calling modes for FarCall
opcode.
Normal calling mode.
+Delegate calling mode (similar to delegatecall
in EVM).
Mimic calling mode (can only be used by system contracts; allows to emulate eth_call
semantics while retaining the bootloader).
source
. Read moreclone_to_uninit
)clone_to_uninit
)pub enum CycleStats {
+ Keccak256(u32),
+ Sha256(u32),
+ EcRecover(u32),
+ Secp256r1Verify(u32),
+ Decommit(u32),
+ StorageRead,
+ StorageWrite,
+}
Cycle statistics emitted by the VM and supplied to Tracer::on_extra_prover_cycles()
.
Call to the keccak256
precompile with the specified number of hash cycles.
Call to the sha256
precompile with the specified number of hash cycles.
Call to the ecrecover
precompile with the specified number of hash cycles.
Call to the secp256r1_verify
precompile with the specified number of hash cycles.
Decommitting an opcode.
+Reading a slot from the VM storage.
+Writing a slot to the VM storage.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)pub enum Opcode {
+Show 43 variants
Nop,
+ Add,
+ Sub,
+ And,
+ Or,
+ Xor,
+ ShiftLeft,
+ ShiftRight,
+ RotateLeft,
+ RotateRight,
+ Mul,
+ Div,
+ NearCall,
+ FarCall(CallingMode),
+ Ret(ReturnType),
+ Jump,
+ Event,
+ L2ToL1Message,
+ Decommit,
+ This,
+ Caller,
+ CodeAddress,
+ ErgsLeft,
+ SP,
+ ContextMeta,
+ ContextU128,
+ SetContextU128,
+ IncrementTxNumber,
+ AuxMutating0,
+ PrecompileCall,
+ HeapRead,
+ HeapWrite,
+ AuxHeapRead,
+ AuxHeapWrite,
+ PointerRead,
+ PointerAdd,
+ PointerSub,
+ PointerPack,
+ PointerShrink,
+ StorageRead,
+ StorageWrite,
+ TransientStorageRead,
+ TransientStorageWrite,
+}
All supported EraVM opcodes in a single enumeration.
+clone_to_uninit
)clone_to_uninit
)pub enum ReturnType {
+ Normal,
+ Revert,
+ Panic,
+}
All supported return types for the Ret
opcode.
Normal return.
+Revert (e.g., a result of a Solidity revert
).
Panic, i.e. a non-revert abnormal control flow termination (e.g., out of gas).
+Checks if this return type is normal.
+source
. Read moreclone_to_uninit
)clone_to_uninit
)This crate defines an interface for tracers that will never change but may be extended. +To be precise, a tracer using this interface will work in any VM written against that +version or a newer one. Updating the tracer to depend on a newer interface version is +not necessary. In fact, tracers should depend on the oldest version that has the required +features.
+A struct implementing Tracer
may read and mutate the VM’s state via StateInterface
+when particular opcodes are executed.
Suppose VM1 uses stable interface version 1 and VM2 uses stable interface version 2. +With any sane design it would be trivial to take a tracer written for version 1 and +update it to work with version 2. However, then it can no longer be used with VM1.
+This exact thing caused us a lot of trouble when we put many versions of zk_evm
in multivm
.
Do not change the existing traits. In fact, you should delete existing code in the new +version that you publish and import it from the previous version instead.
+This is how you would add a new method to StateInterface
and a new opcode.
use zksync_vm2_interface_v1::{
+ StateInterface as StateInterfaceV1, Tracer as TracerV1, opcodes::NearCall,
+};
+
+trait StateInterface: StateInterfaceV1 {
+ fn get_some_new_field(&self) -> u32;
+}
+
+pub struct NewOpcode;
+
+#[derive(PartialEq, Eq)]
+enum Opcode {
+ NewOpcode,
+ NearCall,
+ // ...
+}
+
+trait OpcodeType {
+ const VALUE: Opcode;
+}
+
+impl OpcodeType for NewOpcode {
+ const VALUE: Opcode = Opcode::NewOpcode;
+}
+
+// Do this for every old opcode
+impl OpcodeType for NearCall {
+ const VALUE: Opcode = Opcode::NearCall;
+}
+
+trait Tracer {
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+ fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+}
+
+impl<T: TracerV1> Tracer for T {
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+ match OP::VALUE {
+ Opcode::NewOpcode => {}
+ // Do this for every old opcode
+ Opcode::NearCall => {
+ <Self as TracerV1>::before_instruction::<NearCall, _>(self, state)
+ }
+ }
+ }
+ fn after_instruction<OP: OpcodeType, S: StateInterface>(&mut self, _state: &mut S) {}
+}
+
+// Now you can use the new features by implementing TracerV2
+struct MyTracer;
+impl Tracer for MyTracer {
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+ if OP::VALUE == Opcode::NewOpcode {
+ state.get_some_new_field();
+ }
+ }
+}
FarCall
opcode.Tracer::on_extra_prover_cycles()
.Ret
opcode.Opcode
enum.EraVM opcodes.
+Add
opcode.And
opcode.AuxHeapRead
opcode.AuxHeapWrite
opcode.AuxMutating0
opcode.Caller
opcode.CodeAddress
opcode.ContextMeta
opcode.ContextU128
opcode.Decommit
opcode.FarCall
mode.Div
opcode.ErgsLeft
opcode.Event
opcode.FarCall
group of opcodes distinguished by the calling mode (normal, delegate, or mimic).HeapRead
opcode.HeapWrite
opcode.IncrementTxNumber
opcode.Jump
opcode.L2ToL1Message
opcode.FarCall
mode.Mul
opcode.NearCall
opcode.Nop
opcode.Or
opcode.Ret
urn mode.PointerAdd
opcode.PointerPack
opcode.PointerRead
opcode.PointerShrink
opcode.PointerSub
opcode.PrecompileCall
opcode.Ret
group of opcodes distinguished by the return type (normal, panic, or revert).Ret
urn mode.RotateLeft
opcode.RotateRight
opcode.SP
opcode.SetContextU128
opcode.ShiftLeft
opcode.ShiftRight
opcode.StorageRead
opcode.StorageWrite
opcode.Sub
opcode.This
opcode.TransientStorageRead
opcode.TransientStorageWrite
opcode.Xor
opcode.pub struct Add;
Add
opcode.
pub struct And;
And
opcode.
pub struct AuxHeapRead;
AuxHeapRead
opcode.
pub struct AuxHeapWrite;
AuxHeapWrite
opcode.
pub struct AuxMutating0;
AuxMutating0
opcode.
pub struct Caller;
Caller
opcode.
pub struct CodeAddress;
CodeAddress
opcode.
pub struct ContextMeta;
ContextMeta
opcode.
pub struct ContextU128;
ContextU128
opcode.
pub struct Decommit;
Decommit
opcode.
pub struct Delegate;
Delegate FarCall
mode.
match
it.pub struct Div;
Div
opcode.
pub struct ErgsLeft;
ErgsLeft
opcode.
pub struct Event;
Event
opcode.
pub struct FarCall<M: TypeLevelCallingMode>(/* private fields */);
FarCall
group of opcodes distinguished by the calling mode (normal, delegate, or mimic).
pub struct HeapRead;
HeapRead
opcode.
pub struct HeapWrite;
HeapWrite
opcode.
pub struct IncrementTxNumber;
IncrementTxNumber
opcode.
pub struct Jump;
Jump
opcode.
pub struct L2ToL1Message;
L2ToL1Message
opcode.
pub struct Mimic;
Mimic FarCall
mode.
match
it.pub struct Mul;
Mul
opcode.
pub struct NearCall;
NearCall
opcode.
pub struct Nop;
Nop
opcode.
pub struct Normal;
match
it.match
it.pub struct Or;
Or
opcode.
pub struct Panic;
Panic Ret
urn mode.
match
it.pub struct PointerAdd;
PointerAdd
opcode.
pub struct PointerPack;
PointerPack
opcode.
pub struct PointerRead;
PointerRead
opcode.
pub struct PointerShrink;
PointerShrink
opcode.
pub struct PointerSub;
PointerSub
opcode.
pub struct PrecompileCall;
PrecompileCall
opcode.
pub struct Ret<T: TypeLevelReturnType>(/* private fields */);
Ret
group of opcodes distinguished by the return type (normal, panic, or revert).
pub struct Revert;
Revert Ret
urn mode.
match
it.pub struct RotateLeft;
RotateLeft
opcode.
pub struct RotateRight;
RotateRight
opcode.
pub struct SP;
SP
opcode.
pub struct SetContextU128;
SetContextU128
opcode.
pub struct ShiftLeft;
ShiftLeft
opcode.
pub struct ShiftRight;
ShiftRight
opcode.
pub struct StorageRead;
StorageRead
opcode.
pub struct StorageWrite;
StorageWrite
opcode.
pub struct Sub;
Sub
opcode.
pub struct This;
This
opcode.
pub struct TransientStorageRead;
TransientStorageRead
opcode.
pub struct TransientStorageWrite;
TransientStorageWrite
opcode.
pub struct Xor;
Xor
opcode.
pub trait TypeLevelCallingMode {
+ const VALUE: CallingMode;
+}
Calling mode for the FarCall
opcodes.
Constant corresponding to this mode allowing to easily match
it.
pub trait TypeLevelReturnType {
+ const VALUE: ReturnType;
+}
Return type for the Ret
opcodes.
Constant corresponding to this return type allowing to easily match
it.
Redirecting to ../../zksync_vm2_interface/struct.Event.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/state_interface/struct.Flags.html b/zksync_vm2_interface/state_interface/struct.Flags.html new file mode 100644 index 0000000..d5fdbbb --- /dev/null +++ b/zksync_vm2_interface/state_interface/struct.Flags.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/struct.Flags.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/state_interface/struct.HeapId.html b/zksync_vm2_interface/state_interface/struct.HeapId.html new file mode 100644 index 0000000..1561ed5 --- /dev/null +++ b/zksync_vm2_interface/state_interface/struct.HeapId.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/struct.HeapId.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/state_interface/struct.L2ToL1Log.html b/zksync_vm2_interface/state_interface/struct.L2ToL1Log.html new file mode 100644 index 0000000..a7cae44 --- /dev/null +++ b/zksync_vm2_interface/state_interface/struct.L2ToL1Log.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/struct.L2ToL1Log.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/state_interface/trait.CallframeInterface.html b/zksync_vm2_interface/state_interface/trait.CallframeInterface.html new file mode 100644 index 0000000..ced68e9 --- /dev/null +++ b/zksync_vm2_interface/state_interface/trait.CallframeInterface.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/trait.CallframeInterface.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/state_interface/trait.StateInterface.html b/zksync_vm2_interface/state_interface/trait.StateInterface.html new file mode 100644 index 0000000..ac29b0e --- /dev/null +++ b/zksync_vm2_interface/state_interface/trait.StateInterface.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/trait.StateInterface.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/struct.Event.html b/zksync_vm2_interface/struct.Event.html new file mode 100644 index 0000000..3889624 --- /dev/null +++ b/zksync_vm2_interface/struct.Event.html @@ -0,0 +1,30 @@ +pub struct Event {
+ pub key: U256,
+ pub value: U256,
+ pub is_first: bool,
+ pub shard_id: u8,
+ pub tx_number: u16,
+}
Event emitted by EraVM.
+There is no address field because nobody is interested in events that don’t come +from the event writer, so we simply do not record events coming from anywhere else.
+key: U256
Event key.
+value: U256
Event value.
+is_first: bool
Is this event first in a chain of events?
+shard_id: u8
Shard identifier (currently, always set to 0).
+tx_number: u16
0-based index of a transaction that has emitted this event.
+clone_to_uninit
)clone_to_uninit
)pub struct Flags {
+ pub less_than: bool,
+ pub equal: bool,
+ pub greater: bool,
+}
VM execution flags. See the EraVM reference for more details.
+less_than: bool
“Less than” flag.
+equal: bool
“Equal” flag.
+greater: bool
“Greater than” flag.
+clone_to_uninit
)clone_to_uninit
)pub struct HeapId(/* private fields */);
Identifier of a VM heap.
+EraVM docs sometimes refer to heaps as heap pages; docs in these crate don’t to avoid confusion with internal heap structure.
+Identifier of the calldata heap used by the first executed program (i.e., the bootloader).
+Identifier of the heap used by the first executed program (i.e., the bootloader).
+clone_to_uninit
)clone_to_uninit
)pub struct L2ToL1Log {
+ pub key: U256,
+ pub value: U256,
+ pub is_service: bool,
+ pub address: H160,
+ pub shard_id: u8,
+ pub tx_number: u16,
+}
L2-to-L1 log emitted by EraVM.
+key: U256
Log key.
+value: U256
Log value.
+is_service: bool
Is this a service log?
+address: H160
Address of the contract that has emitted this log.
+shard_id: u8
Shard identifier (currently, always set to 0).
+tx_number: u16
0-based index of a transaction that has emitted this event.
+clone_to_uninit
)clone_to_uninit
)Redirecting to ../../zksync_vm2_interface/enum.CallingMode.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/enum.CycleStats.html b/zksync_vm2_interface/tracer_interface/enum.CycleStats.html new file mode 100644 index 0000000..144e4cf --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/enum.CycleStats.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/enum.CycleStats.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/enum.Opcode.html b/zksync_vm2_interface/tracer_interface/enum.Opcode.html new file mode 100644 index 0000000..57102db --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/enum.Opcode.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/enum.Opcode.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/enum.ReturnType.html b/zksync_vm2_interface/tracer_interface/enum.ReturnType.html new file mode 100644 index 0000000..30a598c --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/enum.ReturnType.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/enum.ReturnType.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/index.html b/zksync_vm2_interface/tracer_interface/opcodes/index.html new file mode 100644 index 0000000..f8c7d9c --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/index.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/index.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Add.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Add.html new file mode 100644 index 0000000..4c505fa --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Add.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Add.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.And.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.And.html new file mode 100644 index 0000000..8260344 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.And.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.And.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapRead.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapRead.html new file mode 100644 index 0000000..2981b07 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapRead.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.AuxHeapRead.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapWrite.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapWrite.html new file mode 100644 index 0000000..4e5bd63 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxHeapWrite.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.AuxHeapWrite.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxMutating0.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxMutating0.html new file mode 100644 index 0000000..e08a1b5 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.AuxMutating0.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.AuxMutating0.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Caller.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Caller.html new file mode 100644 index 0000000..825d5ee --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Caller.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Caller.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.CodeAddress.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.CodeAddress.html new file mode 100644 index 0000000..14bd2de --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.CodeAddress.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.CodeAddress.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextMeta.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextMeta.html new file mode 100644 index 0000000..365b1d9 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextMeta.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.ContextMeta.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextU128.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextU128.html new file mode 100644 index 0000000..43a57e9 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.ContextU128.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.ContextU128.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Decommit.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Decommit.html new file mode 100644 index 0000000..07c5de9 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Decommit.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Decommit.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Delegate.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Delegate.html new file mode 100644 index 0000000..60b52e8 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Delegate.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Delegate.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Div.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Div.html new file mode 100644 index 0000000..f8c6a75 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Div.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Div.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.ErgsLeft.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.ErgsLeft.html new file mode 100644 index 0000000..3a465cc --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.ErgsLeft.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.ErgsLeft.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Event.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Event.html new file mode 100644 index 0000000..1e7bf3c --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Event.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Event.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.FarCall.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.FarCall.html new file mode 100644 index 0000000..2381abb --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.FarCall.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.FarCall.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapRead.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapRead.html new file mode 100644 index 0000000..090238c --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapRead.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.HeapRead.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapWrite.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapWrite.html new file mode 100644 index 0000000..74667d9 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.HeapWrite.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.HeapWrite.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.IncrementTxNumber.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.IncrementTxNumber.html new file mode 100644 index 0000000..cc9ffbe --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.IncrementTxNumber.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.IncrementTxNumber.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Jump.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Jump.html new file mode 100644 index 0000000..8d77f31 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Jump.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Jump.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.L2ToL1Message.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.L2ToL1Message.html new file mode 100644 index 0000000..f7e9cb0 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.L2ToL1Message.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.L2ToL1Message.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Mimic.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Mimic.html new file mode 100644 index 0000000..4021bb4 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Mimic.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Mimic.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Mul.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Mul.html new file mode 100644 index 0000000..d1c0fcb --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Mul.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Mul.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.NearCall.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.NearCall.html new file mode 100644 index 0000000..1f1bd7b --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.NearCall.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.NearCall.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Nop.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Nop.html new file mode 100644 index 0000000..7bbe928 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Nop.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Nop.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Normal.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Normal.html new file mode 100644 index 0000000..e974c8a --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Normal.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Normal.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Or.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Or.html new file mode 100644 index 0000000..9718c12 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Or.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Or.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Panic.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Panic.html new file mode 100644 index 0000000..571b8c5 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Panic.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Panic.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerAdd.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerAdd.html new file mode 100644 index 0000000..0031918 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerAdd.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PointerAdd.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerPack.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerPack.html new file mode 100644 index 0000000..5ea2fe1 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerPack.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PointerPack.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerRead.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerRead.html new file mode 100644 index 0000000..46f530a --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerRead.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PointerRead.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerShrink.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerShrink.html new file mode 100644 index 0000000..0a6afad --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerShrink.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PointerShrink.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerSub.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerSub.html new file mode 100644 index 0000000..0a50841 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PointerSub.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PointerSub.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.PrecompileCall.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.PrecompileCall.html new file mode 100644 index 0000000..d7cbdd2 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.PrecompileCall.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.PrecompileCall.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Ret.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Ret.html new file mode 100644 index 0000000..38fe8dd --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Ret.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Ret.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Revert.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Revert.html new file mode 100644 index 0000000..9a0ffa2 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Revert.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Revert.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateLeft.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateLeft.html new file mode 100644 index 0000000..547fa29 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateLeft.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.RotateLeft.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateRight.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateRight.html new file mode 100644 index 0000000..20c67f6 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.RotateRight.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.RotateRight.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.SP.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.SP.html new file mode 100644 index 0000000..0784564 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.SP.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.SP.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.SetContextU128.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.SetContextU128.html new file mode 100644 index 0000000..fc694a5 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.SetContextU128.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.SetContextU128.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftLeft.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftLeft.html new file mode 100644 index 0000000..740f976 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftLeft.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.ShiftLeft.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftRight.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftRight.html new file mode 100644 index 0000000..c9fd37c --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.ShiftRight.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.ShiftRight.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageRead.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageRead.html new file mode 100644 index 0000000..c6ef508 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageRead.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.StorageRead.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageWrite.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageWrite.html new file mode 100644 index 0000000..ef9f5ba --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.StorageWrite.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.StorageWrite.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Sub.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Sub.html new file mode 100644 index 0000000..41e2b84 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Sub.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Sub.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.This.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.This.html new file mode 100644 index 0000000..281bf97 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.This.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.This.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageRead.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageRead.html new file mode 100644 index 0000000..729a390 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageRead.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.TransientStorageRead.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageWrite.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageWrite.html new file mode 100644 index 0000000..8b06e1a --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.TransientStorageWrite.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.TransientStorageWrite.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/struct.Xor.html b/zksync_vm2_interface/tracer_interface/opcodes/struct.Xor.html new file mode 100644 index 0000000..a24bc1a --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/struct.Xor.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/struct.Xor.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelCallingMode.html b/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelCallingMode.html new file mode 100644 index 0000000..09d0038 --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelCallingMode.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/trait.TypeLevelCallingMode.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelReturnType.html b/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelReturnType.html new file mode 100644 index 0000000..a1279af --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/opcodes/trait.TypeLevelReturnType.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../zksync_vm2_interface/opcodes/trait.TypeLevelReturnType.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/trait.OpcodeType.html b/zksync_vm2_interface/tracer_interface/trait.OpcodeType.html new file mode 100644 index 0000000..a31ff7a --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/trait.OpcodeType.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/trait.OpcodeType.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/tracer_interface/trait.Tracer.html b/zksync_vm2_interface/tracer_interface/trait.Tracer.html new file mode 100644 index 0000000..a6d2a9d --- /dev/null +++ b/zksync_vm2_interface/tracer_interface/trait.Tracer.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../zksync_vm2_interface/trait.Tracer.html...
+ + + \ No newline at end of file diff --git a/zksync_vm2_interface/trait.CallframeInterface.html b/zksync_vm2_interface/trait.CallframeInterface.html new file mode 100644 index 0000000..ad9e2aa --- /dev/null +++ b/zksync_vm2_interface/trait.CallframeInterface.html @@ -0,0 +1,66 @@ +pub trait CallframeInterface {
+Show 29 methods
// Required methods
+ fn address(&self) -> H160;
+ fn set_address(&mut self, address: H160);
+ fn code_address(&self) -> H160;
+ fn set_code_address(&mut self, address: H160);
+ fn caller(&self) -> H160;
+ fn set_caller(&mut self, address: H160);
+ fn program_counter(&self) -> Option<u16>;
+ fn set_program_counter(&mut self, value: u16);
+ fn exception_handler(&self) -> u16;
+ fn set_exception_handler(&mut self, value: u16);
+ fn is_static(&self) -> bool;
+ fn is_kernel(&self) -> bool;
+ fn gas(&self) -> u32;
+ fn set_gas(&mut self, new_gas: u32);
+ fn stipend(&self) -> u32;
+ fn context_u128(&self) -> u128;
+ fn set_context_u128(&mut self, value: u128);
+ fn is_near_call(&self) -> bool;
+ fn read_stack(&self, index: u16) -> (U256, bool);
+ fn write_stack(&mut self, index: u16, value: U256, is_pointer: bool);
+ fn stack_pointer(&self) -> u16;
+ fn set_stack_pointer(&mut self, value: u16);
+ fn heap(&self) -> HeapId;
+ fn heap_bound(&self) -> u32;
+ fn set_heap_bound(&mut self, value: u32);
+ fn aux_heap(&self) -> HeapId;
+ fn aux_heap_bound(&self) -> u32;
+ fn set_aux_heap_bound(&mut self, value: u32);
+ fn read_contract_code(&self, slot: u16) -> U256;
+}
Public interface of an EraVM call frame.
+Address of the storage context associated with this frame. For delegate calls, this address is inherited from the calling contract;
+otherwise, it’s the same as Self::code_address()
.
Sets the address of the executing contract.
+Address of the contract being executed.
+Sets the address of the contract being executed. Does not cause the contract at the specified address get loaded per se, just updates
+the value used internally by the VM (e.g., returned by the CodeAddress
opcode).
Sets the address of the calling contract.
+Returns the current program counter (i.e., 0-based index of the instruction being executed).
+During panic this returns None
.
Sets the program counter. +The VM will execute an invalid instruction if you jump out of the program.
+Returns the program counter that the parent frame should continue from if this frame fails.
+Sets the exception handler as specified above.
+Returns the context value for this call. This context is accessible via ContextU128
opcode.
Sets the context value for this call.
+Checks whether this frame corresponds to a near call.
+Reads the specified stack slot. Returns a value together with a pointer flag.
+Sets the value and pointer flag for the specified stack slot.
+Returns the stack pointer.
+Sets the stack pointer.
+Returns the main heap boundary (number of paid bytes).
+Sets the main heap boundary.
+Returns the auxiliary heap boundary (number of paid bytes).
+Sets the auxiliary heap boundary.
+Reads a word from the bytecode of the executing contract.
+pub trait OpcodeType {
+ const VALUE: Opcode;
+}
Trait mapping opcodes as types to the corresponding variants of the Opcode
enum.
pub trait StateInterface {
+Show 22 methods
// Required methods
+ fn read_register(&self, register: u8) -> (U256, bool);
+ fn set_register(&mut self, register: u8, value: U256, is_pointer: bool);
+ fn current_frame(&mut self) -> impl CallframeInterface + '_;
+ fn number_of_callframes(&self) -> usize;
+ fn callframe(&mut self, n: usize) -> impl CallframeInterface + '_;
+ fn read_heap_byte(&self, heap: HeapId, offset: u32) -> u8;
+ fn read_heap_u256(&self, heap: HeapId, offset: u32) -> U256;
+ fn write_heap_u256(&mut self, heap: HeapId, offset: u32, value: U256);
+ fn flags(&self) -> Flags;
+ fn set_flags(&mut self, flags: Flags);
+ fn transaction_number(&self) -> u16;
+ fn set_transaction_number(&mut self, value: u16);
+ fn context_u128_register(&self) -> u128;
+ fn set_context_u128_register(&mut self, value: u128);
+ fn get_storage_state(&self) -> impl Iterator<Item = ((H160, U256), U256)>;
+ fn get_transient_storage_state(
+ &self,
+ ) -> impl Iterator<Item = ((H160, U256), U256)>;
+ fn get_transient_storage(&self, address: H160, slot: U256) -> U256;
+ fn write_transient_storage(
+ &mut self,
+ address: H160,
+ slot: U256,
+ value: U256,
+ );
+ fn events(&self) -> impl Iterator<Item = Event>;
+ fn l2_to_l1_logs(&self) -> impl Iterator<Item = L2ToL1Log>;
+ fn pubdata(&self) -> i32;
+ fn set_pubdata(&mut self, value: i32);
+}
Public interface of the VM state. Encompasses both read and write methods.
+Reads a register with the specified zero-based index. Returns a value together with a pointer flag.
+Sets a register with the specified zero-based index
+Returns a mutable handle to the current call frame.
+Returns the total number of call frames.
+Returns a mutable handle to a call frame with the specified index, where +zero is the current frame, one is the frame before that etc.
+Reads a single byte from the specified heap at the specified 0-based offset.
+Reads an entire U256
word in the big-endian order from the specified heap / offset
+(which is the index of the most significant byte of the read value).
Writes an entire U256
word in the big-endian order to the specified heap at the specified offset
+(which is the index of the most significant byte of the written value).
Returns the currently set 0-based transaction number.
+Sets the current transaction number.
+Returns the value of the context register.
+Sets the value of the context register.
+Iterates over storage slots read or written during VM execution.
+Iterates over all transient storage slots set during VM execution.
+Gets value of the specified transient storage slot.
+Sets value of the specified transient storage slot.
+Iterates over events emitted during VM execution.
+Iterates over L2-to-L1 logs emitted during VM execution.
+Sets the current amount of published pubdata.
+pub trait Tracer {
+ // Provided methods
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(
+ &mut self,
+ _state: &mut S,
+ ) { ... }
+ fn after_instruction<OP: OpcodeType, S: StateInterface>(
+ &mut self,
+ _state: &mut S,
+ ) { ... }
+ fn on_extra_prover_cycles(&mut self, _stats: CycleStats) { ... }
+}
EraVM instruction tracer.
+Self::before_instruction()
is called just before the actual instruction is executed.
+If the instruction is skipped, before_instruction
will be called with Nop
.
+Self::after_instruction()
is called once the instruction is executed and the program
+counter has advanced.
Here FarCallCounter
counts the number of far calls.
struct FarCallCounter(usize);
+
+impl Tracer for FarCallCounter {
+ fn before_instruction<OP: OpcodeType, S: StateInterface>(&mut self, state: &mut S) {
+ match OP::VALUE {
+ Opcode::FarCall(_) => self.0 += 1,
+ _ => {}
+ }
+ }
+}
Executes logic before an instruction handler.
+The default implementation does nothing.
+Executes logic after an instruction handler.
+The default implementation does nothing.
+Provides cycle statistics for “complex” instructions from the prover perspective (mostly precompile calls).
+The default implementation does nothing.
+No-op tracer implementation.
+