Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <numeric>
7#include <ranges>
8#include <stdexcept>
9
37
42
43namespace bb::avm2::tracegen {
44namespace {
45
46constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
47 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
48 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
49};
50constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
51 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
52 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
53 C::execution_sel_op_is_address_6_,
54};
55constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
56 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
57 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
58 C::execution_op_after_relative_6_,
59};
60constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
61 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
62 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
63};
64constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
65 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
66 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
67};
68constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
69 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
70 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
71 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
72 C::execution_sel_should_apply_indirection_6_,
73};
74constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
75 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
76 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
77 C::execution_sel_relative_overflow_6_,
78};
79constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS = {
80 C::execution_sel_op_do_overflow_check_0_, C::execution_sel_op_do_overflow_check_1_,
81 C::execution_sel_op_do_overflow_check_2_, C::execution_sel_op_do_overflow_check_3_,
82 C::execution_sel_op_do_overflow_check_4_, C::execution_sel_op_do_overflow_check_5_,
83 C::execution_sel_op_do_overflow_check_6_,
84};
85constexpr size_t TOTAL_INDIRECT_BITS = 16;
86static_assert(static_cast<size_t>(AVM_MAX_OPERANDS) * 2 <= TOTAL_INDIRECT_BITS);
87constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
88 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
89 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
90 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
91 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
92
93};
94constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
95 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
96 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
97 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
98 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
99};
100
101constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
102 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_,
103 C::execution_register_3_, C::execution_register_4_, C::execution_register_5_,
104};
105constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
106 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_,
107 C::execution_mem_tag_reg_3_, C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_,
108};
109constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
110 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_,
111 C::execution_rw_reg_3_, C::execution_rw_reg_4_, C::execution_rw_reg_5_,
112};
113constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
114 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
115 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
116};
117constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
118 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
119 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
120};
121constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
122 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
123 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
124};
125constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
126 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
127 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
128};
129
137C get_execution_opcode_selector(ExecutionOpCode exec_opcode)
138{
139 switch (exec_opcode) {
141 return C::execution_sel_execute_get_env_var;
143 return C::execution_sel_execute_mov;
145 return C::execution_sel_execute_jump;
147 return C::execution_sel_execute_jumpi;
149 return C::execution_sel_execute_call;
151 return C::execution_sel_execute_static_call;
153 return C::execution_sel_execute_internal_call;
155 return C::execution_sel_execute_internal_return;
157 return C::execution_sel_execute_return;
159 return C::execution_sel_execute_revert;
161 return C::execution_sel_execute_success_copy;
163 return C::execution_sel_execute_returndata_size;
165 return C::execution_sel_execute_debug_log;
167 return C::execution_sel_execute_sload;
169 return C::execution_sel_execute_sstore;
171 return C::execution_sel_execute_notehash_exists;
173 return C::execution_sel_execute_emit_notehash;
175 return C::execution_sel_execute_l1_to_l2_message_exists;
177 return C::execution_sel_execute_nullifier_exists;
179 return C::execution_sel_execute_emit_nullifier;
181 return C::execution_sel_execute_send_l2_to_l1_msg;
182 default:
183 throw std::runtime_error("Execution opcode does not have a corresponding selector");
184 }
185}
186
190struct FailingContexts {
191 bool app_logic_failure = false;
192 bool teardown_failure = false;
195 unordered_flat_set<uint32_t> does_context_fail;
196};
197
209FailingContexts preprocess_for_discard(
211{
212 FailingContexts dying_info;
213
214 // We use `after_context_event` to retrieve parent_id, context_id, and phase to be consistent with
215 // how these values are populated in the trace (see ExecutionTraceBuilder::process()). These values
216 // should not change during the life-cycle of an execution event though and before_context_event
217 // would lead to the same results.
218
219 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
220 for (const auto& ex_event : ex_events) {
221 bool is_exit = ex_event.is_exit();
222 bool is_top_level = ex_event.after_context_event.parent_id == 0;
223
224 if (is_exit && is_top_level) {
225 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
226 dying_info.app_logic_failure = ex_event.is_failure();
227 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
228 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
229 dying_info.teardown_failure = ex_event.is_failure();
230 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
231 break; // Teardown is the last phase we care about
232 }
233 }
234 }
235
236 // Preprocessing pass 2: find all contexts that fail and mark them
237 for (const auto& ex_event : ex_events) {
238 if (ex_event.is_failure()) {
239 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
240 }
241 }
242
243 return dying_info;
244}
245
253bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
254{
255 // Note that app logic also gets discarded if teardown failures
256 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
257 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
258}
259
267uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
268{
270 "Execution events must have app logic or teardown phase");
271
272 switch (phase) {
274 if (failures.app_logic_failure) {
275 return failures.app_logic_exit_context_id;
276 }
277
278 // Note that app logic also gets discarded if teardown failures
279 if (failures.teardown_failure) {
280 return failures.teardown_exit_context_id;
281 }
282
283 return 0;
284 }
286 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
287 default:
288 __builtin_unreachable(); // tell the compiler "we never reach here"
289 }
290}
291
292} // namespace
293
296{
297 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
298
299 // Preprocess events to determine which contexts will fail
300 const FailingContexts failures = preprocess_for_discard(ex_events);
301
302 // Some variables updated per loop iteration to track
303 // whether or not the upcoming row should "discard" [side effects].
304 uint32_t dying_context_id = 0;
305 // dying_context_id captures whether we discard or not. Namely, discard == 1 <=> dying_context_id != 0
306 // is a circuit invariant. For this reason, we use a lambda to preserve the invariant.
307 auto is_discarding = [&dying_context_id]() { return dying_context_id != 0; };
308 bool is_first_event_in_enqueued_call = true;
309 bool prev_row_was_enter_call = false;
310
311 for (const auto& ex_event : ex_events) {
312 // Check if this is the first event in an enqueued call and whether
313 // the phase should be discarded
314 if (!is_discarding() && is_first_event_in_enqueued_call &&
315 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
316 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
317 }
318
319 const bool has_parent = ex_event.after_context_event.parent_id != 0;
320
321 /**************************************************************************************************
322 * Setup.
323 **************************************************************************************************/
324
325 trace.set(
326 row,
327 { {
328 { C::execution_sel, 1 },
329 // Selectors that indicate "dispatch" from tx trace
330 // Note: Enqueued Call End is determined during the opcode execution temporality group
331 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
332 // Context
333 { C::execution_context_id, ex_event.after_context_event.id },
334 { C::execution_parent_id, ex_event.after_context_event.parent_id },
335 { C::execution_pc, ex_event.before_context_event.pc },
336 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
337 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
338 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
339 { C::execution_is_static, ex_event.after_context_event.is_static },
340 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
341 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
342 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
343 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
344 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
345 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
346 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2_gas },
347 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.da_gas },
348 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2_gas },
349 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.da_gas },
350 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2_gas },
351 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.da_gas },
352 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2_gas },
353 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.da_gas },
354 { C::execution_next_context_id, ex_event.next_context_id },
355 // Context - gas.
356 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2_gas },
357 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.da_gas },
358 // Context - tree states
359 // Context - tree states - Written public data slots tree
360 { C::execution_prev_written_public_data_slots_tree_root,
361 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
362 { C::execution_prev_written_public_data_slots_tree_size,
363 ex_event.before_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
364 { C::execution_written_public_data_slots_tree_root,
365 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
366 { C::execution_written_public_data_slots_tree_size,
367 ex_event.after_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
368 // Context - tree states - Nullifier tree
369 { C::execution_prev_nullifier_tree_root,
370 ex_event.before_context_event.tree_states.nullifier_tree.tree.root },
371 { C::execution_prev_nullifier_tree_size,
372 ex_event.before_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
373 { C::execution_prev_num_nullifiers_emitted,
374 ex_event.before_context_event.tree_states.nullifier_tree.counter },
375 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifier_tree.tree.root },
376 { C::execution_nullifier_tree_size,
377 ex_event.after_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
378 { C::execution_num_nullifiers_emitted,
379 ex_event.after_context_event.tree_states.nullifier_tree.counter },
380 // Context - tree states - Public data tree
381 { C::execution_prev_public_data_tree_root,
382 ex_event.before_context_event.tree_states.public_data_tree.tree.root },
383 { C::execution_prev_public_data_tree_size,
384 ex_event.before_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
385 { C::execution_public_data_tree_root,
386 ex_event.after_context_event.tree_states.public_data_tree.tree.root },
387 { C::execution_public_data_tree_size,
388 ex_event.after_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
389 // Context - tree states - Note hash tree
390 { C::execution_prev_note_hash_tree_root,
391 ex_event.before_context_event.tree_states.note_hash_tree.tree.root },
392 { C::execution_prev_note_hash_tree_size,
393 ex_event.before_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
394 { C::execution_prev_num_note_hashes_emitted,
395 ex_event.before_context_event.tree_states.note_hash_tree.counter },
396 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.note_hash_tree.tree.root },
397 { C::execution_note_hash_tree_size,
398 ex_event.after_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
399 { C::execution_num_note_hashes_emitted,
400 ex_event.after_context_event.tree_states.note_hash_tree.counter },
401 // Context - tree states - L1 to L2 message tree
402 { C::execution_l1_l2_tree_root,
403 ex_event.after_context_event.tree_states.l1_to_l2_message_tree.tree.root },
404 // Context - tree states - Retrieved bytecodes tree
405 { C::execution_prev_retrieved_bytecodes_tree_root,
406 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.root },
407 { C::execution_prev_retrieved_bytecodes_tree_size,
408 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
409 { C::execution_retrieved_bytecodes_tree_root,
410 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.root },
411 { C::execution_retrieved_bytecodes_tree_size,
412 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
413 // Context - side effects
414 { C::execution_prev_num_unencrypted_log_fields, ex_event.before_context_event.numUnencryptedLogFields },
415 { C::execution_num_unencrypted_log_fields, ex_event.after_context_event.numUnencryptedLogFields },
416 { C::execution_prev_num_l2_to_l1_messages, ex_event.before_context_event.numL2ToL1Messages },
417 { C::execution_num_l2_to_l1_messages, ex_event.after_context_event.numL2ToL1Messages },
418 // Helpers for identifying parent context
419 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
420 { C::execution_is_parent_id_inv, ex_event.after_context_event.parent_id }, // Will be inverted in batch.
421 } });
422
423 // Internal stack
424 trace.set(row,
425 { {
426 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
427 { C::execution_internal_call_return_id, ex_event.before_context_event.internal_call_return_id },
428 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
429 } });
430
431 /**************************************************************************************************
432 * Temporality group 1: Bytecode retrieval.
433 **************************************************************************************************/
434
435 const bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_RETRIEVAL;
436 const bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
437 trace.set(row,
438 { {
439 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
440 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
441 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
442 { C::execution_bytecode_id, ex_event.after_context_event.bytecode_id },
443 } });
444
445 /**************************************************************************************************
446 * Temporality group 2: Instruction fetching. Mapping from wire to execution and addressing.
447 **************************************************************************************************/
448
449 // This will only have a value if instruction fetching succeeded.
451 const bool error_in_instruction_fetching = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
452 const bool instruction_fetching_success = !bytecode_retrieval_failed && !error_in_instruction_fetching;
453 trace.set(C::execution_sel_instruction_fetching_failure, row, error_in_instruction_fetching ? 1 : 0);
454
455 if (instruction_fetching_success) {
456 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
457 process_instr_fetching(ex_event.wire_instruction, trace, row);
458 // If we fetched an instruction successfully, we can set the next PC.
459 trace.set(row,
460 { {
461 { C::execution_next_pc,
462 static_cast<uint32_t>(ex_event.before_context_event.pc +
463 ex_event.wire_instruction.size_in_bytes()) },
464 } });
465
466 // Along this function we need to set the info we get from the #[EXEC_SPEC_READ] lookup.
467 process_execution_spec(ex_event, trace, row);
468
469 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
470 }
471
472 const bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
473
474 /**************************************************************************************************
475 * Temporality group 3: Registers read.
476 **************************************************************************************************/
477
478 // Note that if addressing did not fail, register reading will not fail.
480 std::ranges::fill(registers, MemoryValue::from_tag(static_cast<MemoryTag>(0), 0));
481 const bool should_process_registers = instruction_fetching_success && !addressing_failed;
482 const bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
483 if (should_process_registers) {
485 *exec_opcode, ex_event.inputs, ex_event.output, registers, register_processing_failed, trace, row);
486 }
487
488 /**************************************************************************************************
489 * Temporality group 4: Gas (both base and dynamic).
490 **************************************************************************************************/
491
492 const bool should_check_gas = should_process_registers && !register_processing_failed;
493 if (should_check_gas) {
494 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
495
496 // To_Radix Dynamic Gas Factor related selectors.
497 // We need the register information to compute dynamic gas factor and process_gas() does not have
498 // access to it and nor should it.
499 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
500 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
501 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
502 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix_size(radix));
503 trace.set(row,
504 { {
505 // To Radix BE Dynamic Gas
506 { C::execution_two_five_six, 256 },
507 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
508 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
509 { C::execution_num_p_limbs, num_p_limbs },
510 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
511 // Don't set dyn gas factor here since already set in process_gas
512 } });
513 }
514 }
515
516 const bool oog = ex_event.error == ExecutionError::GAS;
517 /**************************************************************************************************
518 * Temporality group 5: Opcode execution.
519 **************************************************************************************************/
520
521 const bool should_execute_opcode = should_check_gas && !oog;
522
523 // These booleans are used after of the "opcode code execution" block but need
524 // to be set as part of the "opcode code execution" block.
525 bool sel_enter_call = false;
526 bool sel_exit_call = false;
527 bool should_execute_revert = false;
528
529 const bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
530 if (should_execute_opcode) {
531 // At this point we can assume instruction fetching succeeded, so this should never fail.
532 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(*exec_opcode);
533 trace.set(row,
534 { {
535 { C::execution_sel_should_execute_opcode, 1 },
536 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
537 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
538 } });
539
540 // Execution Trace opcodes - separating for clarity
541 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
542 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
543 }
544
545 // Execution trace opcodes specific logic.
546 // Note that the opcode selectors were set above. (e.g., sel_execute_call, sel_execute_static_call, ..).
547 if (*exec_opcode == ExecutionOpCode::CALL || *exec_opcode == ExecutionOpCode::STATICCALL) {
548 sel_enter_call = true;
549
550 const Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
551
552 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
553 bool is_l2_gas_left_gt_allocated = gas_left.l2_gas > allocated_l2_gas;
554
555 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
556 bool is_da_gas_left_gt_allocated = gas_left.da_gas > allocated_da_gas;
557
558 trace.set(row,
559 { {
560 { C::execution_sel_enter_call, 1 },
561 { C::execution_l2_gas_left, gas_left.l2_gas },
562 { C::execution_da_gas_left, gas_left.da_gas },
563 { C::execution_is_l2_gas_left_gt_allocated, is_l2_gas_left_gt_allocated ? 1 : 0 },
564 { C::execution_is_da_gas_left_gt_allocated, is_da_gas_left_gt_allocated ? 1 : 0 },
565 } });
566 } else if (*exec_opcode == ExecutionOpCode::RETURN) {
567 sel_exit_call = true;
568 trace.set(row,
569 { {
570 { C::execution_nested_return, has_parent ? 1 : 0 },
571 } });
572 } else if (*exec_opcode == ExecutionOpCode::REVERT) {
573 sel_exit_call = true;
574 should_execute_revert = true;
575 } else if (exec_opcode == ExecutionOpCode::GETENVVAR) {
576 BB_ASSERT_EQ(ex_event.addressing_event.resolution_info.size(),
577 static_cast<size_t>(2),
578 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
579 // rop[1] is the envvar enum
580 Operand envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
581 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
582 } else if (*exec_opcode == ExecutionOpCode::INTERNALRETURN) {
583 if (!opcode_execution_failed) {
584 // If we have an opcode error, we don't need to compute the inverse (see internal_call.pil)
585 trace.set(
586 C::execution_internal_call_return_id_inv,
587 row,
588 ex_event.before_context_event.internal_call_return_id); // Will be inverted in batch later.
589 trace.set(C::execution_sel_read_unwind_call_stack, row, 1);
590 }
591 } else if (*exec_opcode == ExecutionOpCode::SSTORE) {
592 // Equivalent to PIL's (MAX + INITIAL_SIZE - prev_written_public_data_slots_tree_size)
593 // since prev_size = counter + 1 and INITIAL_SIZE = 1.
594 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
595 ex_event.before_context_event.tree_states.public_data_tree.counter;
596
597 trace.set(row,
598 { {
599 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
600 { C::execution_remaining_data_writes_inv,
601 remaining_data_writes }, // Will be inverted in batch later.
602 { C::execution_sel_write_public_data, !opcode_execution_failed },
603 } });
604 } else if (*exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
605 uint64_t leaf_index = registers[1].as<uint64_t>();
606 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
607 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
608
609 trace.set(row,
610 { {
611 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
612 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
613 } });
614 } else if (*exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
615 uint32_t remaining_note_hashes =
616 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.note_hash_tree.counter;
617
618 trace.set(row,
619 { {
620 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
621 { C::execution_remaining_note_hashes_inv,
622 remaining_note_hashes }, // Will be inverted in batch later.
623 { C::execution_sel_write_note_hash, !opcode_execution_failed },
624 } });
625 } else if (*exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
626 uint64_t leaf_index = registers[1].as<uint64_t>();
627 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
628 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
629
630 trace.set(row,
631 { {
632 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
633 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
634 } });
635 //} else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
636 // no custom columns!
637 } else if (*exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
638 uint32_t remaining_nullifiers =
639 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifier_tree.counter;
640
641 trace.set(row,
642 { {
643 { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
644 { C::execution_remaining_nullifiers_inv,
645 remaining_nullifiers }, // Will be inverted in batch later.
646 { C::execution_sel_write_nullifier,
647 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
648 } });
649 } else if (*exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
650 uint32_t remaining_l2_to_l1_msgs =
651 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.numL2ToL1Messages;
652
653 trace.set(row,
654 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
655 { C::execution_remaining_l2_to_l1_msgs_inv,
656 remaining_l2_to_l1_msgs }, // Will be inverted in batch later.
657 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && !is_discarding() },
658 {
659 C::execution_public_inputs_index,
661 ex_event.before_context_event.numL2ToL1Messages,
662 } } });
663 }
664 }
665
666 /**************************************************************************************************
667 * Temporality group 6: Register write.
668 **************************************************************************************************/
669
670 const bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
671 if (should_process_register_write) {
672 process_registers_write(*exec_opcode, trace, row);
673 }
674
675 /**************************************************************************************************
676 * Discarding and error related selectors.
677 **************************************************************************************************/
678
679 const bool is_dying_context = ex_event.after_context_event.id == dying_context_id;
680 // Need to generate the item below for checking "is dying context" in circuit
681 // No need to condition by `!is_dying_context` as batch inversion skips 0.
682 const FF dying_context_diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
683
684 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
685 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
686 const bool is_err = ex_event.error != ExecutionError::NONE;
687 sel_exit_call = sel_exit_call || is_err; // sel_execute_revert || sel_execute_return || sel_error
688 const bool is_failure = should_execute_revert || is_err;
689 const bool nested_exit_call = sel_exit_call && has_parent;
690 const bool enqueued_call_end = sel_exit_call && !has_parent;
691 const bool nested_failure = is_failure && has_parent;
692
693 trace.set(row,
694 { {
695 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
696 { C::execution_nested_exit_call, nested_exit_call ? 1 : 0 },
697 { C::execution_nested_failure, nested_failure ? 1 : 0 },
698 { C::execution_sel_error, is_err ? 1 : 0 },
699 { C::execution_sel_failure, is_failure ? 1 : 0 },
700 { C::execution_discard, is_discarding() ? 1 : 0 },
701 { C::execution_dying_context_id, dying_context_id },
702 { C::execution_dying_context_id_inv, dying_context_id }, // Will be inverted in batch.
703 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
704 { C::execution_dying_context_diff_inv, dying_context_diff }, // Will be inverted in batch.
705 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
706 } });
707
708 // Trace-generation is done for this event.
709 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event.
710 // Note: is_failure implies discard is true.
711 const bool event_kills_dying_context = is_failure && is_dying_context;
712
713 if (event_kills_dying_context) {
714 // Set/unset discard flag if the current event is the one that kills the dying context
715 dying_context_id = 0;
716 } else if (sel_enter_call && !is_discarding() &&
717 failures.does_context_fail.contains(ex_event.next_context_id)) {
718 // If making a nested call, and discard isn't already high...
719 // if the nested context being entered eventually dies, we set which context is dying (implicitly raise
720 // discard flag). NOTE: If a [STATIC]CALL instruction _itself_ errors, we don't set the discard flag
721 // because we aren't actually entering a new context. This is already captured by `sel_enter_call`
722 // boolean which is set to true only during opcode execution temporality group which cannot
723 // fail for CALL/STATICALL.
724 dying_context_id = ex_event.next_context_id;
725 }
726 // Otherwise, we aren't entering or exiting a dying context,
727 // so just propagate discard and dying context.
728 // Implicit: dying_context_id = dying_context_id; discard = discard;
729
730 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
731 // Update flag for next iteration.
732 is_first_event_in_enqueued_call = !has_parent && sel_exit_call;
733
734 // Track this bool for use determining whether the next row is the first in a context
735 prev_row_was_enter_call = sel_enter_call;
736
737 row++;
738 }
739
740 // Batch invert the columns.
742}
743
745 TraceContainer& trace,
746 uint32_t row)
747{
748 trace.set(row,
749 { {
750 { C::execution_sel_instruction_fetching_success, 1 },
751 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
752 { C::execution_indirect, instruction.indirect },
753 { C::execution_instr_length, instruction.size_in_bytes() },
754 } });
755
756 // At this point we can assume instruction fetching succeeded.
757 auto operands = instruction.operands;
758 BB_ASSERT_LTE(operands.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Operands size is out of range");
759 operands.resize(AVM_MAX_OPERANDS, Operand::from<FF>(0));
760
761 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
762 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
763 }
764}
765
767 TraceContainer& trace,
768 uint32_t row)
769{
770 // At this point we can assume instruction fetching succeeded, so this should never fail.
771 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
772 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
773 const auto& gas_cost = exec_spec.gas_cost;
774
775 // Gas.
776 trace.set(row,
777 { {
778 { C::execution_opcode_gas, gas_cost.opcode_gas },
779 { C::execution_base_da_gas, gas_cost.base_da },
780 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
781 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
782 } });
783
784 const auto& register_info = exec_spec.register_info;
785 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
786 trace.set(row,
787 { {
788 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
789 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
790 { REGISTER_EXPECTED_TAG_COLUMNS[i],
791 register_info.need_tag_check(i) ? static_cast<uint32_t>(*(register_info.expected_tag(i))) : 0 },
792 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
793 } });
794 }
795
796 // Set is_address columns
797 const auto& num_addresses = exec_spec.num_addresses;
798 for (size_t i = 0; i < num_addresses; i++) {
799 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
800 }
801
802 // At this point we can assume instruction fetching succeeded, so this should never fail.
803 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(exec_opcode);
804 trace.set(row,
805 { {
806 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
807 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
808 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
809 } });
810}
811
813 ExecutionOpCode exec_opcode,
814 TraceContainer& trace,
815 uint32_t row)
816{
817 bool oog = gas_event.oog_l2 || gas_event.oog_da;
818 trace.set(row,
819 { {
820 { C::execution_sel_should_check_gas, 1 },
821 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
822 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
823 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
824 // Addressing gas.
825 { C::execution_addressing_gas, gas_event.addressing_gas },
826 // Dynamic gas.
827 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2_gas },
828 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.da_gas },
829 // Derived cumulative gas used.
830 { C::execution_total_gas_l2, gas_event.total_gas_used_l2 },
831 { C::execution_total_gas_da, gas_event.total_gas_used_da },
832 } });
833
834 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
835 if (exec_spec.dyn_gas_id != 0) {
836 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
837 }
838}
839
842 TraceContainer& trace,
843 uint32_t row)
844{
845 // At this point we can assume instruction fetching succeeded, so this should never fail.
846 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
847 const ExecInstructionSpec& ex_spec = get_exec_instruction_spec().at(exec_opcode);
848
849 auto resolution_info_vec = addr_event.resolution_info;
851 resolution_info_vec.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Resolution info size is out of range");
852 // Pad with default values for the missing operands.
853 resolution_info_vec.resize(AVM_MAX_OPERANDS,
854 {
855 // This is the default we want: both tag and value 0.
856 .after_relative = FF::zero(),
857 .resolved_operand = Operand::from_tag(static_cast<ValueTag>(0), 0),
858 .error = std::nullopt,
859 });
860
861 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
864 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
865 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
867 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
868 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
869 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
870 uint8_t num_relative_operands = 0;
871
872 // The error about the base address being invalid is stored in every resolution_info member when it happens.
873 bool base_address_invalid = resolution_info_vec[0].error.has_value() &&
874 *resolution_info_vec[0].error == AddressingEventError::BASE_ADDRESS_INVALID;
875 bool do_base_check = false; // Whether we need to retrieve the base address,
876 // i.e., at least one operand is relative.
877
878 // Gather operand information.
879 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
880 const auto& resolution_info = resolution_info_vec[i];
881 bool op_is_address = i < ex_spec.num_addresses;
882 relative_oob[i] = resolution_info.error.has_value() &&
883 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
884 is_relative[i] = is_operand_relative(instruction.indirect, i);
885 is_indirect[i] = is_operand_indirect(instruction.indirect, i);
886 is_relative_effective[i] = op_is_address && is_relative[i];
887 is_indirect_effective[i] = op_is_address && is_indirect[i];
888 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i] && !base_address_invalid;
889 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
890 after_relative[i] = resolution_info.after_relative;
891 resolved_operand[i] = resolution_info.resolved_operand;
892 if (is_relative_effective[i]) {
893 do_base_check = true;
894 num_relative_operands++;
895 }
896 }
897
898 BB_ASSERT(do_base_check || !base_address_invalid, "Base address is invalid but we are not checking it.");
899
900 // Set the operand columns.
901 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
902 trace.set(row,
903 { {
904 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative[i] ? 1 : 0 },
905 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect[i] ? 1 : 0 },
906 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
907 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
908 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
909 { OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS[i],
910 (is_relative_effective[i] && !base_address_invalid) ? 1 : 0 },
911 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
912 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
913 } });
914 }
915
916 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
917 // See comment in PIL file about indirect upper bits.
918 for (size_t i = AVM_MAX_OPERANDS; i < TOTAL_INDIRECT_BITS / 2; i++) {
919 bool is_relative = is_operand_relative(instruction.indirect, i);
920 bool is_indirect = is_operand_indirect(instruction.indirect, i);
921 trace.set(row,
922 { {
923 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
924 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
925 } });
926 }
927
928 // Inverse of following difference is required when base address is invalid.
929 FF base_address_tag_diff = base_address_invalid ? FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) -
930 FF(static_cast<uint8_t>(MemoryTag::U32))
931 : 0;
932
933 // Tag check after indirection.
934 bool some_final_check_failed = std::ranges::any_of(addr_event.resolution_info, [](const auto& info) {
935 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
936 });
937 FF batched_tags_diff = 0;
938 if (some_final_check_failed) {
939 FF power_of_2 = 1;
940 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
941 if (should_apply_indirection[i]) {
942 batched_tags_diff += power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
943 }
944 power_of_2 *= 8; // 2^3
945 }
946 }
947
948 // Collect addressing errors. See PIL file for reference.
949 bool addressing_failed =
950 std::ranges::any_of(addr_event.resolution_info, [](const auto& info) { return info.error.has_value(); });
951 FF addressing_error_collection =
952 addressing_failed
953 ? FF(
954 // Base address invalid.
955 (base_address_invalid ? 1 : 0) +
956 // Relative overflow.
957 std::accumulate(addr_event.resolution_info.begin(),
958 addr_event.resolution_info.end(),
959 static_cast<uint32_t>(0),
960 [](uint32_t acc, const auto& info) {
961 return acc +
962 (info.error.has_value() &&
963 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
964 ? 1
965 : 0);
966 }) +
967 // Some invalid address after indirection.
968 (some_final_check_failed ? 1 : 0))
969 : 0;
970
971 trace.set(
972 row,
973 { {
974 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
975 { C::execution_addressing_error_collection_inv, addressing_error_collection }, // Will be inverted in batch.
976 { C::execution_base_address_val, addr_event.base_address.as_ff() },
977 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
978 { C::execution_base_address_tag_diff_inv, base_address_tag_diff }, // Will be inverted in batch.
979 { C::execution_batched_tags_diff_inv, batched_tags_diff }, // Will be inverted in batch.
980 { C::execution_sel_some_final_check_failed, some_final_check_failed ? 1 : 0 },
981 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
982 { C::execution_num_relative_operands_inv, num_relative_operands }, // Will be inverted in batch later.
983 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
984 { C::execution_highest_address, AVM_HIGHEST_MEM_ADDRESS },
985 } });
986}
987
989{
990 trace.invert_columns({ {
991 // Registers.
992 C::execution_batched_tags_diff_inv_reg,
993 // Context.
994 C::execution_is_parent_id_inv,
995 C::execution_internal_call_return_id_inv,
996 // Trees.
997 C::execution_remaining_data_writes_inv,
998 C::execution_remaining_note_hashes_inv,
999 C::execution_remaining_nullifiers_inv,
1000 // L1ToL2MsgExists.
1001 C::execution_remaining_l2_to_l1_msgs_inv,
1002 // Discard.
1003 C::execution_dying_context_id_inv,
1004 C::execution_dying_context_diff_inv,
1005 // Addressing.
1006 C::execution_addressing_error_collection_inv,
1007 C::execution_batched_tags_diff_inv,
1008 C::execution_base_address_tag_diff_inv,
1009 C::execution_num_relative_operands_inv,
1010 } });
1011}
1012
1015 const MemoryValue& output,
1017 bool register_processing_failed,
1018 TraceContainer& trace,
1019 uint32_t row)
1020{
1021 BB_ASSERT_EQ(registers.size(), static_cast<size_t>(AVM_MAX_REGISTERS), "Registers size is out of range");
1022 // At this point we can assume instruction fetching succeeded, so this should never fail.
1023 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1024
1025 // Registers. We set all of them here, even the write ones. This is fine because
1026 // if an error occured before the register write group, simulation would pass the default
1027 // value-tag (0, 0). Furthermore, the permutation of the memory write would not be activated.
1028 size_t input_counter = 0;
1029 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1030 if (register_info.is_active(i)) {
1031 if (register_info.is_write(i)) {
1032 // If this is a write operation, we need to get the value from the output.
1033 registers[i] = output;
1034 } else {
1035 // If this is a read operation, we need to get the value from the input.
1036
1037 // Register specifications must be consistent with the number of inputs.
1038 BB_ASSERT(inputs.size() > input_counter, "Not enough inputs for register read");
1039
1040 registers[i] = inputs.at(input_counter);
1041 input_counter++;
1042 }
1043 }
1044 }
1045
1046 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1047 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1048 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1049 // This one is special because it sets the reads (but not the writes).
1050 // If we got here, sel_should_read_registers=1.
1051 if (register_info.is_active(i) && !register_info.is_write(i)) {
1052 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1053 }
1054 }
1055
1056 FF batched_tags_diff_reg = 0;
1057 if (register_processing_failed) {
1058 FF power_of_2 = 1;
1059 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1060 if (register_info.need_tag_check(i)) {
1061 batched_tags_diff_reg += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1062 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1063 }
1064 power_of_2 *= 8; // 2^3
1065 }
1066 }
1067
1068 trace.set(row,
1069 { {
1070 { C::execution_sel_should_read_registers, 1 },
1071 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_reg }, // Will be inverted in batch.
1072 { C::execution_sel_register_read_error, register_processing_failed ? 1 : 0 },
1073 } });
1074}
1075
1077{
1078 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1079 trace.set(C::execution_sel_should_write_registers, row, 1);
1080
1081 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1082 // This one is special because it sets the writes.
1083 // If we got here, sel_should_write_registers=1.
1084 if (register_info.is_active(i) && register_info.is_write(i)) {
1085 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1086 }
1087 }
1088}
1089
1091 MemoryValue output,
1092 TraceContainer& trace,
1093 uint32_t row)
1094{
1095 BB_ASSERT_EQ(envvar_enum.get_tag(), ValueTag::U8, "Envvar enum tag is not U8");
1096 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1097
1098 trace.set(row,
1099 { {
1100 { C::execution_sel_execute_get_env_var, 1 },
1101 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1102 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1103 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1104 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1105 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1106 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1107 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1108 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1109 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1110 { C::execution_value_from_pi,
1111 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1112 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1113 } });
1114}
1115
1118 // Execution specification (precomputed)
1120 // Bytecode retrieval
1121 .add<lookup_execution_bytecode_retrieval_result_settings, InteractionType::LookupGeneric>()
1122 // Instruction fetching
1124 .add<lookup_execution_instruction_fetching_body_settings, InteractionType::LookupGeneric>()
1125 // Addressing
1127 .add<lookup_addressing_relative_overflow_result_1_settings, InteractionType::LookupGeneric>(C::gt_sel)
1129 .add<lookup_addressing_relative_overflow_result_3_settings, InteractionType::LookupGeneric>(C::gt_sel)
1131 .add<lookup_addressing_relative_overflow_result_5_settings, InteractionType::LookupGeneric>(C::gt_sel)
1133 // Internal Call Stack
1134 .add<perm_internal_call_push_call_stack_settings_, InteractionType::Permutation>()
1136 // Gas
1137 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByClk>()
1139 .add<lookup_gas_is_out_of_gas_da_settings, InteractionType::LookupGeneric>(C::gt_sel)
1141 // Gas - ToRadix BE
1142 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>(C::gt_sel)
1144 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>(C::gt_sel)
1145 // Dynamic Gas - SStore
1147 // Context Stack
1148 .add<perm_context_ctx_stack_call_settings, InteractionType::Permutation>()
1150 .add<lookup_context_ctx_stack_return_settings, InteractionType::LookupGeneric>()
1151 // External Call
1153 .add<lookup_external_call_is_da_gas_left_gt_allocated_settings, InteractionType::LookupGeneric>(C::gt_sel)
1154 // GetEnvVar opcode
1156 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByClk>()
1158 // Sload opcode (cannot be sequential as public data tree check trace is sorted in tracegen)
1159 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1160 // Sstore opcode
1162 // NoteHashExists
1163 .add<lookup_notehash_exists_note_hash_read_settings, InteractionType::LookupSequential>()
1165 // NullifierExists opcode
1166 .add<lookup_nullifier_exists_nullifier_exists_check_settings, InteractionType::LookupSequential>()
1167 // EmitNullifier
1169 // EmitNoteHash
1170 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1171 // L1ToL2MsgExists
1173 C::gt_sel)
1174 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1175 // SendL2ToL1Msg
1177 // Dispatching to other sub-traces
1178 .add<lookup_execution_dispatch_to_alu_settings, InteractionType::LookupGeneric>()
1180 .add<perm_execution_dispatch_to_cd_copy_settings, InteractionType::Permutation>()
1182 .add<lookup_execution_dispatch_to_cast_settings, InteractionType::LookupGeneric>()
1184 .add<perm_execution_dispatch_to_get_contract_instance_settings, InteractionType::Permutation>()
1186 .add<perm_execution_dispatch_to_poseidon2_perm_settings, InteractionType::Permutation>()
1188 .add<perm_execution_dispatch_to_keccakf1600_settings, InteractionType::Permutation>()
1190 .add<perm_execution_dispatch_to_to_radix_settings, InteractionType::Permutation>();
1191
1192} // namespace bb::avm2::tracegen
#define BB_ASSERT(expression,...)
Definition assert.hpp:80
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:93
#define BB_ASSERT_LTE(left, right,...)
Definition assert.hpp:168
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define NOTE_HASH_TREE_LEAF_COUNT
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define AVM_MAX_REGISTERS
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define AVM_HIGHEST_MEM_ADDRESS
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
static TaggedValue from_tag(ValueTag tag, FF value)
ValueTag get_tag() const
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_registers(ExecutionOpCode exec_opcode, const std::vector< MemoryValue > &inputs, const MemoryValue &output, std::span< MemoryValue > registers, bool register_processing_failed, TraceContainer &trace, uint32_t row)
void process_get_env_var_opcode(simulation::Operand envvar_enum, MemoryValue output, TraceContainer &trace, uint32_t row)
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
void info(Args... args)
Definition log.hpp:89
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
unordered_flat_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
AvmProvingInputs inputs
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
const std::unordered_map< ExecutionOpCode, SubtraceInfo > & get_subtrace_info_map()
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
lookup_settings< lookup_execution_check_written_storage_slot_settings_ > lookup_execution_check_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_result_2_settings_ > lookup_addressing_relative_overflow_result_2_settings
permutation_settings< perm_execution_dispatch_to_emit_unencrypted_log_settings_ > perm_execution_dispatch_to_emit_unencrypted_log_settings
lookup_settings< lookup_addressing_relative_overflow_result_4_settings_ > lookup_addressing_relative_overflow_result_4_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
lookup_settings< lookup_external_call_is_l2_gas_left_gt_allocated_settings_ > lookup_external_call_is_l2_gas_left_gt_allocated_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is relative.
lookup_settings< lookup_emit_nullifier_write_nullifier_settings_ > lookup_emit_nullifier_write_nullifier_settings
size_t get_p_limbs_per_radix_size(size_t radix)
Definition to_radix.cpp:54
lookup_settings< lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings_ > lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings
permutation_settings< perm_execution_dispatch_to_sha256_compression_settings_ > perm_execution_dispatch_to_sha256_compression_settings
lookup_settings< lookup_gas_is_out_of_gas_l2_settings_ > lookup_gas_is_out_of_gas_l2_settings
lookup_settings< lookup_execution_dispatch_to_set_settings_ > lookup_execution_dispatch_to_set_settings
lookup_settings< lookup_context_ctx_stack_rollback_settings_ > lookup_context_ctx_stack_rollback_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is indirect.
lookup_settings< lookup_execution_dispatch_to_bitwise_settings_ > lookup_execution_dispatch_to_bitwise_settings
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > & get_exec_instruction_spec()
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_addressing_relative_overflow_result_0_settings_ > lookup_addressing_relative_overflow_result_0_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
permutation_settings< perm_execution_dispatch_to_ecc_add_settings_ > perm_execution_dispatch_to_ecc_add_settings
lookup_settings< lookup_addressing_relative_overflow_result_6_settings_ > lookup_addressing_relative_overflow_result_6_settings
lookup_settings< lookup_execution_instruction_fetching_result_settings_ > lookup_execution_instruction_fetching_result_settings
lookup_settings< lookup_notehash_exists_note_hash_leaf_index_in_range_settings_ > lookup_notehash_exists_note_hash_leaf_index_in_range_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
permutation_settings< perm_execution_dispatch_to_rd_copy_settings_ > perm_execution_dispatch_to_rd_copy_settings
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const