for BytePackingSt
// Each byte index must be boolean.
for i in 0..NUM_BYTES {
- let idx_i = local_values[index_bytes(i)];
+ let idx_i = local_values[index_len(i)];
yield_constr.constraint(idx_i * (idx_i - one));
}
- // The sequence start flag column must start by one.
- let current_sequence_start = local_values[index_bytes(0)];
- yield_constr.constraint_first_row(current_sequence_start - one);
-
- // The sequence end flag must be boolean
- let current_sequence_end = local_values[SEQUENCE_END];
- yield_constr.constraint(current_sequence_end * (current_sequence_end - one));
-
- // If filter is off, all flags and byte indices must be off.
- let byte_indices = local_values[BYTE_INDICES_COLS].iter().copied().sum::();
- yield_constr.constraint(
- (current_filter - one) * (current_is_read + current_sequence_end + byte_indices),
- );
-
// Only padding rows have their filter turned off.
- let next_filter = next_values[BYTE_INDICES_COLS].iter().copied().sum::
();
+ let next_filter = next_values[LEN_INDICES_COLS].iter().copied().sum::
();
yield_constr.constraint_transition(next_filter * (next_filter - current_filter));
- // Unless the current sequence end flag is activated, the is_read filter must remain unchanged.
- let next_is_read = next_values[IS_READ];
- yield_constr
- .constraint_transition((current_sequence_end - one) * (next_is_read - current_is_read));
-
- // If the sequence end flag is activated, the next row must be a new sequence or filter must be off.
- let next_sequence_start = next_values[index_bytes(0)];
- yield_constr.constraint_transition(
- current_sequence_end * next_filter * (next_sequence_start - one),
- );
-
- // The active position in a byte sequence must increase by one on every row
- // or be one on the next row (i.e. at the start of a new sequence).
- let current_position = self.get_active_position(local_values);
- let next_position = self.get_active_position(next_values);
- yield_constr.constraint_transition(
- next_filter * (next_position - one) * (next_position - current_position - one),
- );
-
- // The last row must be the end of a sequence or a padding row.
- yield_constr.constraint_last_row(current_filter * (current_sequence_end - one));
-
- // If the next position is one in an active row, the current end flag must be one.
- yield_constr
- .constraint_transition(next_filter * current_sequence_end * (next_position - one));
-
- // The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
- // The virtual address must decrement by one at each step of a sequence.
- let current_context = local_values[ADDR_CONTEXT];
- let next_context = next_values[ADDR_CONTEXT];
- let current_segment = local_values[ADDR_SEGMENT];
- let next_segment = next_values[ADDR_SEGMENT];
- let current_virtual = local_values[ADDR_VIRTUAL];
- let next_virtual = next_values[ADDR_VIRTUAL];
- let current_timestamp = local_values[TIMESTAMP];
- let next_timestamp = next_values[TIMESTAMP];
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_context - current_context),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_segment - current_segment),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_timestamp - current_timestamp),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (current_virtual - next_virtual - one),
- );
-
- // If not at the end of a sequence, each next byte must equal the current one
- // when reading through the sequence, or the next byte index must be one.
- for i in 0..NUM_BYTES {
- let current_byte = local_values[value_bytes(i)];
- let next_byte = next_values[value_bytes(i)];
- let next_byte_index = next_values[index_bytes(i)];
- yield_constr.constraint_transition(
- (current_sequence_end - one) * (next_byte_index - one) * (next_byte - current_byte),
- );
+ // Check that all limbs after final length are 0.
+ for i in 0..NUM_BYTES - 1 {
+ // If the length is i+1, then value_bytes(i+1),...,value_bytes(NUM_BYTES-1) must be 0.
+ for j in i + 1..NUM_BYTES {
+ yield_constr.constraint(local_values[index_len(i)] * local_values[value_bytes(j)]);
+ }
}
}
@@ -422,9 +331,23 @@ impl, const D: usize> Stark for BytePackingSt
let next_values: &[ExtensionTarget; NUM_COLUMNS] =
vars.get_next_values().try_into().unwrap();
+ // Check the range column: First value must be 0, last row
+ // must be 255, and intermediate rows must increment by 0
+ // or 1.
+ let rc1 = local_values[RANGE_COUNTER];
+ let rc2 = next_values[RANGE_COUNTER];
+ yield_constr.constraint_first_row(builder, rc1);
+ let incr = builder.sub_extension(rc2, rc1);
+ let t = builder.mul_sub_extension(incr, incr, incr);
+ yield_constr.constraint_transition(builder, t);
+ let range_max =
+ builder.constant_extension(F::Extension::from_canonical_usize(BYTE_RANGE_MAX - 1));
+ let t = builder.sub_extension(rc1, range_max);
+ yield_constr.constraint_last_row(builder, t);
+
// We filter active columns by summing all the byte indices.
// Constraining each of them to be boolean is done later on below.
- let current_filter = builder.add_many_extension(&local_values[BYTE_INDICES_COLS]);
+ let current_filter = builder.add_many_extension(&local_values[LEN_INDICES_COLS]);
let constraint = builder.mul_sub_extension(current_filter, current_filter, current_filter);
yield_constr.constraint(builder, constraint);
@@ -440,119 +363,25 @@ impl, const D: usize> Stark for BytePackingSt
// Each byte index must be boolean.
for i in 0..NUM_BYTES {
- let idx_i = local_values[index_bytes(i)];
+ let idx_i = local_values[index_len(i)];
let constraint = builder.mul_sub_extension(idx_i, idx_i, idx_i);
yield_constr.constraint(builder, constraint);
}
- // The sequence start flag column must start by one.
- let current_sequence_start = local_values[index_bytes(0)];
- let constraint = builder.add_const_extension(current_sequence_start, F::NEG_ONE);
- yield_constr.constraint_first_row(builder, constraint);
-
- // The sequence end flag must be boolean
- let current_sequence_end = local_values[SEQUENCE_END];
- let constraint = builder.mul_sub_extension(
- current_sequence_end,
- current_sequence_end,
- current_sequence_end,
- );
- yield_constr.constraint(builder, constraint);
-
- // If filter is off, all flags and byte indices must be off.
- let byte_indices = builder.add_many_extension(&local_values[BYTE_INDICES_COLS]);
- let constraint = builder.add_extension(current_sequence_end, byte_indices);
- let constraint = builder.add_extension(constraint, current_is_read);
- let constraint = builder.mul_sub_extension(constraint, current_filter, constraint);
- yield_constr.constraint(builder, constraint);
-
// Only padding rows have their filter turned off.
- let next_filter = builder.add_many_extension(&next_values[BYTE_INDICES_COLS]);
+ let next_filter = builder.add_many_extension(&next_values[LEN_INDICES_COLS]);
let constraint = builder.sub_extension(next_filter, current_filter);
let constraint = builder.mul_extension(next_filter, constraint);
yield_constr.constraint_transition(builder, constraint);
- // Unless the current sequence end flag is activated, the is_read filter must remain unchanged.
- let next_is_read = next_values[IS_READ];
- let diff_is_read = builder.sub_extension(next_is_read, current_is_read);
- let constraint =
- builder.mul_sub_extension(diff_is_read, current_sequence_end, diff_is_read);
- yield_constr.constraint_transition(builder, constraint);
-
- // If the sequence end flag is activated, the next row must be a new sequence or filter must be off.
- let next_sequence_start = next_values[index_bytes(0)];
- let constraint = builder.mul_sub_extension(
- current_sequence_end,
- next_sequence_start,
- current_sequence_end,
- );
- let constraint = builder.mul_extension(next_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
-
- // The active position in a byte sequence must increase by one on every row
- // or be one on the next row (i.e. at the start of a new sequence).
- let current_position = self.get_active_position_circuit(builder, local_values);
- let next_position = self.get_active_position_circuit(builder, next_values);
-
- let position_diff = builder.sub_extension(next_position, current_position);
- let is_new_or_inactive = builder.mul_sub_extension(next_filter, next_position, next_filter);
- let constraint =
- builder.mul_sub_extension(is_new_or_inactive, position_diff, is_new_or_inactive);
- yield_constr.constraint_transition(builder, constraint);
-
- // The last row must be the end of a sequence or a padding row.
- let constraint =
- builder.mul_sub_extension(current_filter, current_sequence_end, current_filter);
- yield_constr.constraint_last_row(builder, constraint);
-
- // If the next position is one in an active row, the current end flag must be one.
- let constraint = builder.mul_extension(next_filter, current_sequence_end);
- let constraint = builder.mul_sub_extension(constraint, next_position, constraint);
- yield_constr.constraint_transition(builder, constraint);
-
- // The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
- // The virtual address must decrement by one at each step of a sequence.
- let current_context = local_values[ADDR_CONTEXT];
- let next_context = next_values[ADDR_CONTEXT];
- let current_segment = local_values[ADDR_SEGMENT];
- let next_segment = next_values[ADDR_SEGMENT];
- let current_virtual = local_values[ADDR_VIRTUAL];
- let next_virtual = next_values[ADDR_VIRTUAL];
- let current_timestamp = local_values[TIMESTAMP];
- let next_timestamp = next_values[TIMESTAMP];
- let addr_filter = builder.mul_sub_extension(next_filter, next_sequence_start, next_filter);
- {
- let constraint = builder.sub_extension(next_context, current_context);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(next_segment, current_segment);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(next_timestamp, current_timestamp);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(current_virtual, next_virtual);
- let constraint = builder.mul_sub_extension(addr_filter, constraint, addr_filter);
- yield_constr.constraint_transition(builder, constraint);
- }
-
- // If not at the end of a sequence, each next byte must equal the current one
- // when reading through the sequence, or the next byte index must be one.
- for i in 0..NUM_BYTES {
- let current_byte = local_values[value_bytes(i)];
- let next_byte = next_values[value_bytes(i)];
- let next_byte_index = next_values[index_bytes(i)];
- let byte_diff = builder.sub_extension(next_byte, current_byte);
- let constraint = builder.mul_sub_extension(byte_diff, next_byte_index, byte_diff);
- let constraint =
- builder.mul_sub_extension(constraint, current_sequence_end, constraint);
- yield_constr.constraint_transition(builder, constraint);
+ // Check that all limbs after final length are 0.
+ for i in 0..NUM_BYTES - 1 {
+ // If the length is i+1, then value_bytes(i+1),...,value_bytes(NUM_BYTES-1) must be 0.
+ for j in i + 1..NUM_BYTES {
+ let constr =
+ builder.mul_extension(local_values[index_len(i)], local_values[value_bytes(j)]);
+ yield_constr.constraint(builder, constr);
+ }
}
}
@@ -560,11 +389,12 @@ impl, const D: usize> Stark for BytePackingSt
3
}
- fn lookups(&self) -> Vec {
+ fn lookups(&self) -> Vec> {
vec![Lookup {
- columns: (value_bytes(0)..value_bytes(0) + NUM_BYTES).collect(),
- table_column: RANGE_COUNTER,
- frequencies_column: RC_FREQUENCIES,
+ columns: Column::singles(value_bytes(0)..value_bytes(0) + NUM_BYTES).collect(),
+ table_column: Column::single(RANGE_COUNTER),
+ frequencies_column: Column::single(RC_FREQUENCIES),
+ filter_columns: vec![None; NUM_BYTES],
}]
}
}
diff --git a/evm/src/byte_packing/columns.rs b/evm/src/byte_packing/columns.rs
index d29a1b6f51..cbed53de1d 100644
--- a/evm/src/byte_packing/columns.rs
+++ b/evm/src/byte_packing/columns.rs
@@ -6,24 +6,20 @@ use crate::byte_packing::NUM_BYTES;
/// 1 if this is a READ operation, and 0 if this is a WRITE operation.
pub(crate) const IS_READ: usize = 0;
-/// 1 if this is the end of a sequence of bytes.
-/// This is also used as filter for the CTL.
-pub(crate) const SEQUENCE_END: usize = IS_READ + 1;
-
-pub(super) const BYTES_INDICES_START: usize = SEQUENCE_END + 1;
-// There are `NUM_BYTES` columns used to represent the index of the active byte
-// for a given row of a byte (un)packing operation.
-pub(crate) const fn index_bytes(i: usize) -> usize {
+
+pub(super) const LEN_INDICES_START: usize = IS_READ + 1;
+// There are `NUM_BYTES` columns used to represent the length of
+// the input byte sequence for a (un)packing operation.
+// index_len(i) is 1 iff the length is i+1.
+pub(crate) const fn index_len(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
- BYTES_INDICES_START + i
+ LEN_INDICES_START + i
}
-// Note: Those are used as filter for distinguishing active vs padding rows,
-// and also to obtain the length of a sequence of bytes being processed.
-pub(crate) const BYTE_INDICES_COLS: Range =
- BYTES_INDICES_START..BYTES_INDICES_START + NUM_BYTES;
+// Note: Those are used to obtain the length of a sequence of bytes being processed.
+pub(crate) const LEN_INDICES_COLS: Range = LEN_INDICES_START..LEN_INDICES_START + NUM_BYTES;
-pub(crate) const ADDR_CONTEXT: usize = BYTES_INDICES_START + NUM_BYTES;
+pub(crate) const ADDR_CONTEXT: usize = LEN_INDICES_START + NUM_BYTES;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;
@@ -32,7 +28,6 @@ pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;
const BYTES_VALUES_START: usize = TIMESTAMP + 1;
// There are `NUM_BYTES` columns used to store the values of the bytes
// that are being read/written for an (un)packing operation.
-// If `index_bytes(i) == 1`, then all `value_bytes(j) for j <= i` may be non-zero.
pub(crate) const fn value_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_VALUES_START + i
diff --git a/evm/src/config.rs b/evm/src/config.rs
index a593c827c2..3f88d99f5d 100644
--- a/evm/src/config.rs
+++ b/evm/src/config.rs
@@ -1,20 +1,29 @@
use plonky2::fri::reduction_strategies::FriReductionStrategy;
use plonky2::fri::{FriConfig, FriParams};
+/// A configuration containing the different parameters to be used by the STARK prover.
pub struct StarkConfig {
+ /// The targeted security level for the proofs generated with this configuration.
pub security_bits: usize,
/// The number of challenge points to generate, for IOPs that have soundness errors of (roughly)
/// `degree / |F|`.
pub num_challenges: usize,
+ /// The configuration of the FRI sub-protocol.
pub fri_config: FriConfig,
}
+impl Default for StarkConfig {
+ fn default() -> Self {
+ Self::standard_fast_config()
+ }
+}
+
impl StarkConfig {
/// A typical configuration with a rate of 2, resulting in fast but large proofs.
/// Targets ~100 bit conjectured security.
- pub fn standard_fast_config() -> Self {
+ pub const fn standard_fast_config() -> Self {
Self {
security_bits: 100,
num_challenges: 2,
diff --git a/evm/src/constraint_consumer.rs b/evm/src/constraint_consumer.rs
index 49dc018ce3..919b51638a 100644
--- a/evm/src/constraint_consumer.rs
+++ b/evm/src/constraint_consumer.rs
@@ -1,4 +1,4 @@
-use std::marker::PhantomData;
+use core::marker::PhantomData;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
@@ -29,7 +29,7 @@ pub struct ConstraintConsumer {
}
impl ConstraintConsumer {
- pub fn new(
+ pub(crate) fn new(
alphas: Vec,
z_last: P,
lagrange_basis_first: P,
@@ -44,17 +44,17 @@ impl ConstraintConsumer {
}
}
- pub fn accumulators(self) -> Vec
{
+ pub(crate) fn accumulators(self) -> Vec
{
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
- pub fn constraint_transition(&mut self, constraint: P) {
+ pub(crate) fn constraint_transition(&mut self, constraint: P) {
self.constraint(constraint * self.z_last);
}
/// Add one constraint on all rows.
- pub fn constraint(&mut self, constraint: P) {
+ pub(crate) fn constraint(&mut self, constraint: P) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc *= alpha;
*acc += constraint;
@@ -63,13 +63,13 @@ impl ConstraintConsumer {
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
- pub fn constraint_first_row(&mut self, constraint: P) {
+ pub(crate) fn constraint_first_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_first);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
- pub fn constraint_last_row(&mut self, constraint: P) {
+ pub(crate) fn constraint_last_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_last);
}
}
@@ -96,7 +96,7 @@ pub struct RecursiveConstraintConsumer, const D: us
}
impl, const D: usize> RecursiveConstraintConsumer {
- pub fn new(
+ pub(crate) fn new(
zero: ExtensionTarget,
alphas: Vec,
z_last: ExtensionTarget,
@@ -113,12 +113,12 @@ impl, const D: usize> RecursiveConstraintConsumer Vec> {
+ pub(crate) fn accumulators(self) -> Vec> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
- pub fn constraint_transition(
+ pub(crate) fn constraint_transition(
&mut self,
builder: &mut CircuitBuilder,
constraint: ExtensionTarget,
@@ -128,7 +128,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
@@ -140,7 +140,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
@@ -151,7 +151,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
diff --git a/evm/src/cpu/bootstrap_kernel.rs b/evm/src/cpu/bootstrap_kernel.rs
deleted file mode 100644
index b04ff379dd..0000000000
--- a/evm/src/cpu/bootstrap_kernel.rs
+++ /dev/null
@@ -1,173 +0,0 @@
-//! The initial phase of execution, where the kernel code is hashed while being written to memory.
-//! The hash is then checked against a precomputed kernel hash.
-
-use ethereum_types::U256;
-use itertools::Itertools;
-use plonky2::field::extension::Extendable;
-use plonky2::field::packed::PackedField;
-use plonky2::field::types::Field;
-use plonky2::hash::hash_types::RichField;
-use plonky2::iop::ext_target::ExtensionTarget;
-use plonky2::plonk::circuit_builder::CircuitBuilder;
-
-use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
-use crate::cpu::columns::CpuColumnsView;
-use crate::cpu::kernel::aggregator::KERNEL;
-use crate::cpu::membus::NUM_GP_CHANNELS;
-use crate::generation::state::GenerationState;
-use crate::memory::segments::Segment;
-use crate::witness::memory::MemoryAddress;
-use crate::witness::util::{keccak_sponge_log, mem_write_gp_log_and_fill};
-
-/// Generates the rows to bootstrap the kernel.
-pub(crate) fn generate_bootstrap_kernel(state: &mut GenerationState) {
- // Iterate through chunks of the code, such that we can write one chunk to memory per row.
- for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
- let mut cpu_row = CpuColumnsView::default();
- cpu_row.clock = F::from_canonical_usize(state.traces.clock());
- cpu_row.is_bootstrap_kernel = F::ONE;
-
- // Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
- for (channel, (addr, &byte)) in chunk.enumerate() {
- let address = MemoryAddress::new(0, Segment::Code, addr);
- let write =
- mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, byte.into());
- state.traces.push_memory(write);
- }
-
- state.traces.push_cpu(cpu_row);
- }
-
- let mut final_cpu_row = CpuColumnsView::default();
- final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
- final_cpu_row.is_bootstrap_kernel = F::ONE;
- final_cpu_row.is_keccak_sponge = F::ONE;
- // The Keccak sponge CTL uses memory value columns for its inputs and outputs.
- final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context
- final_cpu_row.mem_channels[1].value[0] = F::from_canonical_usize(Segment::Code as usize); // segment
- final_cpu_row.mem_channels[2].value[0] = F::ZERO; // virt
- final_cpu_row.mem_channels[3].value[0] = F::from_canonical_usize(KERNEL.code.len()); // len
- final_cpu_row.mem_channels[4].value = KERNEL.code_hash.map(F::from_canonical_u32);
- final_cpu_row.mem_channels[4].value.reverse();
- keccak_sponge_log(
- state,
- MemoryAddress::new(0, Segment::Code, 0),
- KERNEL.code.clone(),
- );
- state.registers.stack_top = KERNEL
- .code_hash
- .iter()
- .enumerate()
- .fold(0.into(), |acc, (i, &elt)| {
- acc + (U256::from(elt) << (224 - 32 * i))
- });
- state.traces.push_cpu(final_cpu_row);
- log::info!("Bootstrapping took {} cycles", state.traces.clock());
-}
-
-/// Evaluates the constraints for kernel bootstrapping.
-pub(crate) fn eval_bootstrap_kernel_packed>(
- local_values: &CpuColumnsView,
- next_values: &CpuColumnsView
,
- yield_constr: &mut ConstraintConsumer
,
-) {
- // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
- let local_is_bootstrap = local_values.is_bootstrap_kernel;
- let next_is_bootstrap = next_values.is_bootstrap_kernel;
- yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
- yield_constr.constraint_last_row(local_is_bootstrap);
- let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
- yield_constr.constraint_transition(delta_is_bootstrap * (delta_is_bootstrap + P::ONES));
-
- // If this is a bootloading row and the i'th memory channel is used, it must have the right
- // address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
- let code_segment = F::from_canonical_usize(Segment::Code as usize);
- for (i, channel) in local_values.mem_channels.iter().enumerate() {
- let filter = local_is_bootstrap * channel.used;
- yield_constr.constraint(filter * channel.addr_context);
- yield_constr.constraint(filter * (channel.addr_segment - code_segment));
- let expected_virt = local_values.clock * F::from_canonical_usize(NUM_GP_CHANNELS)
- + F::from_canonical_usize(i);
- yield_constr.constraint(filter * (channel.addr_virtual - expected_virt));
- }
-
- // If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
- // - all memory channels are disabled
- // - the current kernel hash matches a precomputed one
- for channel in local_values.mem_channels.iter() {
- yield_constr.constraint_transition(delta_is_bootstrap * channel.used);
- }
- for (&expected, actual) in KERNEL
- .code_hash
- .iter()
- .rev()
- .zip(local_values.mem_channels.last().unwrap().value)
- {
- let expected = P::from(F::from_canonical_u32(expected));
- let diff = expected - actual;
- yield_constr.constraint_transition(delta_is_bootstrap * diff);
- }
-}
-
-/// Circuit version of `eval_bootstrap_kernel_packed`.
-/// Evaluates the constraints for kernel bootstrapping.
-pub(crate) fn eval_bootstrap_kernel_ext_circuit, const D: usize>(
- builder: &mut CircuitBuilder,
- local_values: &CpuColumnsView>,
- next_values: &CpuColumnsView>,
- yield_constr: &mut RecursiveConstraintConsumer,
-) {
- let one = builder.one_extension();
-
- // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
- let local_is_bootstrap = local_values.is_bootstrap_kernel;
- let next_is_bootstrap = next_values.is_bootstrap_kernel;
- let constraint = builder.sub_extension(local_is_bootstrap, one);
- yield_constr.constraint_first_row(builder, constraint);
- yield_constr.constraint_last_row(builder, local_is_bootstrap);
- let delta_is_bootstrap = builder.sub_extension(next_is_bootstrap, local_is_bootstrap);
- let constraint =
- builder.mul_add_extension(delta_is_bootstrap, delta_is_bootstrap, delta_is_bootstrap);
- yield_constr.constraint_transition(builder, constraint);
-
- // If this is a bootloading row and the i'th memory channel is used, it must have the right
- // address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
- let code_segment =
- builder.constant_extension(F::Extension::from_canonical_usize(Segment::Code as usize));
- for (i, channel) in local_values.mem_channels.iter().enumerate() {
- let filter = builder.mul_extension(local_is_bootstrap, channel.used);
- let constraint = builder.mul_extension(filter, channel.addr_context);
- yield_constr.constraint(builder, constraint);
-
- let segment_diff = builder.sub_extension(channel.addr_segment, code_segment);
- let constraint = builder.mul_extension(filter, segment_diff);
- yield_constr.constraint(builder, constraint);
-
- let i_ext = builder.constant_extension(F::Extension::from_canonical_usize(i));
- let num_gp_channels_f = F::from_canonical_usize(NUM_GP_CHANNELS);
- let expected_virt =
- builder.mul_const_add_extension(num_gp_channels_f, local_values.clock, i_ext);
- let virt_diff = builder.sub_extension(channel.addr_virtual, expected_virt);
- let constraint = builder.mul_extension(filter, virt_diff);
- yield_constr.constraint(builder, constraint);
- }
-
- // If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
- // - all memory channels are disabled
- // - the current kernel hash matches a precomputed one
- for channel in local_values.mem_channels.iter() {
- let constraint = builder.mul_extension(delta_is_bootstrap, channel.used);
- yield_constr.constraint_transition(builder, constraint);
- }
- for (&expected, actual) in KERNEL
- .code_hash
- .iter()
- .rev()
- .zip(local_values.mem_channels.last().unwrap().value)
- {
- let expected = builder.constant_extension(F::Extension::from_canonical_u32(expected));
- let diff = builder.sub_extension(expected, actual);
- let constraint = builder.mul_extension(delta_is_bootstrap, diff);
- yield_constr.constraint_transition(builder, constraint);
- }
-}
diff --git a/evm/src/cpu/byte_unpacking.rs b/evm/src/cpu/byte_unpacking.rs
new file mode 100644
index 0000000000..39053141d6
--- /dev/null
+++ b/evm/src/cpu/byte_unpacking.rs
@@ -0,0 +1,94 @@
+use plonky2::field::extension::Extendable;
+use plonky2::field::packed::PackedField;
+use plonky2::field::types::Field;
+use plonky2::hash::hash_types::RichField;
+use plonky2::iop::ext_target::ExtensionTarget;
+use plonky2::plonk::circuit_builder::CircuitBuilder;
+
+use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
+use crate::cpu::columns::CpuColumnsView;
+
+pub(crate) fn eval_packed(
+ lv: &CpuColumnsView,
+ nv: &CpuColumnsView
,
+ yield_constr: &mut ConstraintConsumer
,
+) {
+ // The MSTORE_32BYTES opcodes are differentiated from MLOAD_32BYTES
+ // by the 5th bit set to 0.
+ let filter = lv.op.m_op_32bytes * (lv.opcode_bits[5] - P::ONES);
+
+ // The address to write to is stored in the first memory channel.
+ // It contains virt, segment, ctx in its first 3 limbs, and 0 otherwise.
+ // The new address is identical, except for its `virtual` limb that is increased by the corresponding `len` offset.
+ let new_addr = nv.mem_channels[0].value;
+ let written_addr = lv.mem_channels[0].value;
+
+ // Read len from opcode bits and constrain the pushed new offset.
+ let len_bits: P = lv.opcode_bits[..5]
+ .iter()
+ .enumerate()
+ .map(|(i, &bit)| bit * P::Scalar::from_canonical_u64(1 << i))
+ .sum();
+ let len = len_bits + P::ONES;
+
+ // Check that `virt` is increased properly.
+ yield_constr.constraint(filter * (new_addr[0] - written_addr[0] - len));
+
+ // Check that `segment` and `ctx` do not change.
+ yield_constr.constraint(filter * (new_addr[1] - written_addr[1]));
+ yield_constr.constraint(filter * (new_addr[2] - written_addr[2]));
+
+ // Check that the rest of the returned address is null.
+ for &limb in &new_addr[3..] {
+ yield_constr.constraint(filter * limb);
+ }
+}
+
+pub(crate) fn eval_ext_circuit, const D: usize>(
+ builder: &mut CircuitBuilder,
+ lv: &CpuColumnsView>,
+ nv: &CpuColumnsView>,
+ yield_constr: &mut RecursiveConstraintConsumer,
+) {
+ // The MSTORE_32BYTES opcodes are differentiated from MLOAD_32BYTES
+ // by the 5th bit set to 0.
+ let filter =
+ builder.mul_sub_extension(lv.op.m_op_32bytes, lv.opcode_bits[5], lv.op.m_op_32bytes);
+
+ // The address to write to is stored in the first memory channel.
+ // It contains virt, segment, ctx in its first 3 limbs, and 0 otherwise.
+ // The new address is identical, except for its `virtual` limb that is increased by the corresponding `len` offset.
+ let new_addr = nv.mem_channels[0].value;
+ let written_addr = lv.mem_channels[0].value;
+
+ // Read len from opcode bits and constrain the pushed new offset.
+ let len_bits = lv.opcode_bits[..5].iter().enumerate().fold(
+ builder.zero_extension(),
+ |cumul, (i, &bit)| {
+ builder.mul_const_add_extension(F::from_canonical_u64(1 << i), bit, cumul)
+ },
+ );
+
+ // Check that `virt` is increased properly.
+ let diff = builder.sub_extension(new_addr[0], written_addr[0]);
+ let diff = builder.sub_extension(diff, len_bits);
+ let constr = builder.mul_sub_extension(filter, diff, filter);
+ yield_constr.constraint(builder, constr);
+
+ // Check that `segment` and `ctx` do not change.
+ {
+ let diff = builder.sub_extension(new_addr[1], written_addr[1]);
+ let constr = builder.mul_extension(filter, diff);
+ yield_constr.constraint(builder, constr);
+
+ let diff = builder.sub_extension(new_addr[2], written_addr[2]);
+ let constr = builder.mul_extension(filter, diff);
+ yield_constr.constraint(builder, constr);
+ }
+
+ // Check that the rest of the returned address is null.
+ for &limb in &new_addr[3..] {
+ let constr = builder.mul_extension(filter, limb);
+ yield_constr.constraint(builder, constr);
+ }
+}
diff --git a/evm/src/cpu/clock.rs b/evm/src/cpu/clock.rs
new file mode 100644
index 0000000000..cd7b17d8ed
--- /dev/null
+++ b/evm/src/cpu/clock.rs
@@ -0,0 +1,37 @@
+use plonky2::field::extension::Extendable;
+use plonky2::field::packed::PackedField;
+use plonky2::hash::hash_types::RichField;
+use plonky2::iop::ext_target::ExtensionTarget;
+
+use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
+use crate::cpu::columns::CpuColumnsView;
+
+/// Check the correct updating of `clock`.
+pub(crate) fn eval_packed(
+ lv: &CpuColumnsView,
+ nv: &CpuColumnsView
,
+ yield_constr: &mut ConstraintConsumer
,
+) {
+ // The clock is 0 at the beginning.
+ yield_constr.constraint_first_row(lv.clock);
+ // The clock is incremented by 1 at each row.
+ yield_constr.constraint_transition(nv.clock - lv.clock - P::ONES);
+}
+
+/// Circuit version of `eval_packed`.
+/// Check the correct updating of `clock`.
+pub(crate) fn eval_ext_circuit, const D: usize>(
+ builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder,
+ lv: &CpuColumnsView>,
+ nv: &CpuColumnsView>,
+ yield_constr: &mut RecursiveConstraintConsumer,
+) {
+ // The clock is 0 at the beginning.
+ yield_constr.constraint_first_row(builder, lv.clock);
+ // The clock is incremented by 1 at each row.
+ {
+ let new_clock = builder.add_const_extension(lv.clock, F::ONE);
+ let constr = builder.sub_extension(nv.clock, new_clock);
+ yield_constr.constraint_transition(builder, constr);
+ }
+}
diff --git a/evm/src/cpu/columns/general.rs b/evm/src/cpu/columns/general.rs
index cfc24f51c6..f565acc625 100644
--- a/evm/src/cpu/columns/general.rs
+++ b/evm/src/cpu/columns/general.rs
@@ -1,6 +1,6 @@
-use std::borrow::{Borrow, BorrowMut};
-use std::fmt::{Debug, Formatter};
-use std::mem::{size_of, transmute};
+use core::borrow::{Borrow, BorrowMut};
+use core::fmt::{Debug, Formatter};
+use core::mem::{size_of, transmute};
/// General purpose columns, which can have different meanings depending on what CTL or other
/// operation is occurring at this row.
@@ -76,6 +76,7 @@ impl CpuGeneralColumnsView {
}
impl PartialEq for CpuGeneralColumnsView {
+ #[allow(clippy::unconditional_recursion)] // false positive
fn eq(&self, other: &Self) -> bool {
let self_arr: &[T; NUM_SHARED_COLUMNS] = self.borrow();
let other_arr: &[T; NUM_SHARED_COLUMNS] = other.borrow();
@@ -135,20 +136,22 @@ pub(crate) struct CpuShiftView {
pub(crate) high_limb_sum_inv: T,
}
-/// View of the last three `CpuGeneralColumns` storing the stack length pseudoinverse `stack_inv`,
-/// stack_len * stack_inv and filter * stack_inv_aux when needed.
+/// View of the last four `CpuGeneralColumns` storing stack-related variables. The first three are used
+/// for conditionally enabling and disabling channels when reading the next `stack_top`, and the fourth one
+/// is used to check for stack overflow.
#[derive(Copy, Clone)]
pub(crate) struct CpuStackView {
- // Used for conditionally enabling and disabling channels when reading the next `stack_top`.
- _unused: [T; 5],
- /// Pseudoinverse of the stack len.
+ _unused: [T; 4],
+ /// Pseudoinverse of `stack_len - num_pops`.
pub(crate) stack_inv: T,
/// stack_inv * stack_len.
pub(crate) stack_inv_aux: T,
- /// Holds filter * stack_inv_aux when necessary, to reduce the degree of stack constraints.
+ /// Used to reduce the degree of stack constraints when needed.
pub(crate) stack_inv_aux_2: T,
+ /// Pseudoinverse of `nv.stack_len - (MAX_USER_STACK_SIZE + 1)` to check for stack overflow.
+ pub(crate) stack_len_bounds_aux: T,
}
/// Number of columns shared by all the views of `CpuGeneralColumnsView`.
/// `u8` is guaranteed to have a `size_of` of 1.
-pub const NUM_SHARED_COLUMNS: usize = size_of::>();
+pub(crate) const NUM_SHARED_COLUMNS: usize = size_of::>();
diff --git a/evm/src/cpu/columns/mod.rs b/evm/src/cpu/columns/mod.rs
index f83147c2dc..92da4e9979 100644
--- a/evm/src/cpu/columns/mod.rs
+++ b/evm/src/cpu/columns/mod.rs
@@ -1,7 +1,7 @@
-use std::borrow::{Borrow, BorrowMut};
-use std::fmt::Debug;
-use std::mem::{size_of, transmute};
-use std::ops::{Index, IndexMut};
+use core::borrow::{Borrow, BorrowMut};
+use core::fmt::Debug;
+use core::mem::{size_of, transmute};
+use core::ops::{Index, IndexMut};
use plonky2::field::types::Field;
@@ -21,7 +21,7 @@ pub type MemValue = [T; memory::VALUE_LIMBS];
/// View of the columns required for one memory channel.
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub struct MemoryChannelView {
+pub(crate) struct MemoryChannelView {
/// 1 if this row includes a memory operation in the `i`th channel of the memory bus, otherwise
/// 0.
pub used: T,
@@ -39,11 +39,20 @@ pub struct MemoryChannelView {
/// View of all the columns in `CpuStark`.
#[repr(C)]
-#[derive(Clone, Copy, Eq, PartialEq, Debug)]
-pub struct CpuColumnsView {
- /// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
- pub is_bootstrap_kernel: T,
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+// A more lightweight channel, sharing values with the 0-th memory channel
+// (which contains the top of the stack).
+pub(crate) struct PartialMemoryChannelView {
+ pub used: T,
+ pub is_read: T,
+ pub addr_context: T,
+ pub addr_segment: T,
+ pub addr_virtual: T,
+}
+#[repr(C)]
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+pub(crate) struct CpuColumnsView {
/// If CPU cycle: Current context.
pub context: T,
@@ -56,15 +65,11 @@ pub struct CpuColumnsView {
/// If CPU cycle: The stack length.
pub stack_len: T,
- /// If CPU cycle: A prover-provided value needed to show that the instruction does not cause the
- /// stack to underflow or overflow.
- pub stack_len_bounds_aux: T,
-
/// If CPU cycle: We're in kernel (privileged) mode.
pub is_kernel_mode: T,
- /// If CPU cycle: Gas counter, split in two 32-bit limbs in little-endian order.
- pub gas: [T; 2],
+ /// If CPU cycle: Gas counter.
+ pub gas: T,
/// If CPU cycle: flags for EVM instructions (a few cannot be shared; see the comments in
/// `OpsColumnsView`).
@@ -73,22 +78,22 @@ pub struct CpuColumnsView {
/// If CPU cycle: the opcode, broken up into bits in little-endian order.
pub opcode_bits: [T; 8],
- /// Filter. 1 iff a Keccak sponge lookup is performed on this row.
- pub is_keccak_sponge: T,
-
/// Columns shared by various operations.
pub(crate) general: CpuGeneralColumnsView,
/// CPU clock.
pub(crate) clock: T,
- /// Memory bus channels in the CPU. Each channel is comprised of 13 columns.
+ /// Memory bus channels in the CPU.
+ /// Full channels are comprised of 13 columns.
pub mem_channels: [MemoryChannelView; NUM_GP_CHANNELS],
+ /// Partial channel is only comprised of 5 columns.
+ pub(crate) partial_channel: PartialMemoryChannelView,
}
/// Total number of columns in `CpuStark`.
/// `u8` is guaranteed to have a `size_of` of 1.
-pub const NUM_CPU_COLUMNS: usize = size_of::>();
+pub(crate) const NUM_CPU_COLUMNS: usize = size_of::>();
impl Default for CpuColumnsView {
fn default() -> Self {
@@ -160,4 +165,4 @@ const fn make_col_map() -> CpuColumnsView {
}
/// Mapping between [0..NUM_CPU_COLUMNS-1] and the CPU columns.
-pub const COL_MAP: CpuColumnsView = make_col_map();
+pub(crate) const COL_MAP: CpuColumnsView = make_col_map();
diff --git a/evm/src/cpu/columns/ops.rs b/evm/src/cpu/columns/ops.rs
index 282fa393e2..c15d657229 100644
--- a/evm/src/cpu/columns/ops.rs
+++ b/evm/src/cpu/columns/ops.rs
@@ -1,13 +1,13 @@
-use std::borrow::{Borrow, BorrowMut};
-use std::mem::{size_of, transmute};
-use std::ops::{Deref, DerefMut};
+use core::borrow::{Borrow, BorrowMut};
+use core::mem::{size_of, transmute};
+use core::ops::{Deref, DerefMut};
use crate::util::transmute_no_compile_time_size_checks;
/// Structure representing the flags for the various opcodes.
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
-pub struct OpsColumnsView {
+pub(crate) struct OpsColumnsView {
/// Combines ADD, MUL, SUB, DIV, MOD, LT, GT and BYTE flags.
pub binary_op: T,
/// Combines ADDMOD, MULMOD and SUBMOD flags.
@@ -24,20 +24,16 @@ pub struct OpsColumnsView {
pub shift: T,
/// Combines JUMPDEST and KECCAK_GENERAL flags.
pub jumpdest_keccak_general: T,
- /// Flag for PROVER_INPUT.
- pub prover_input: T,
/// Combines JUMP and JUMPI flags.
pub jumps: T,
- /// Flag for PUSH.
- pub push: T,
+ /// Combines PUSH and PROVER_INPUT flags.
+ pub push_prover_input: T,
/// Combines DUP and SWAP flags.
pub dup_swap: T,
/// Combines GET_CONTEXT and SET_CONTEXT flags.
pub context_op: T,
- /// Flag for MSTORE_32BYTES.
- pub mstore_32bytes: T,
- /// Flag for MLOAD_32BYTES.
- pub mload_32bytes: T,
+ /// Combines MSTORE_32BYTES and MLOAD_32BYTES.
+ pub m_op_32bytes: T,
/// Flag for EXIT_KERNEL.
pub exit_kernel: T,
/// Combines MSTORE_GENERAL and MLOAD_GENERAL flags.
@@ -53,7 +49,7 @@ pub struct OpsColumnsView {
/// Number of columns in Cpu Stark.
/// `u8` is guaranteed to have a `size_of` of 1.
-pub const NUM_OPS_COLUMNS: usize = size_of::>();
+pub(crate) const NUM_OPS_COLUMNS: usize = size_of::>();
impl From<[T; NUM_OPS_COLUMNS]> for OpsColumnsView {
fn from(value: [T; NUM_OPS_COLUMNS]) -> Self {
diff --git a/evm/src/cpu/contextops.rs b/evm/src/cpu/contextops.rs
index 8e25d5a168..ec4e5e5e6e 100644
--- a/evm/src/cpu/contextops.rs
+++ b/evm/src/cpu/contextops.rs
@@ -7,10 +7,9 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use super::columns::ops::OpsColumnsView;
-use super::membus::NUM_GP_CHANNELS;
+use super::cpu_stark::{disable_unused_channels, disable_unused_channels_circuit};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
-use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::memory::segments::Segment;
// If true, the instruction will keep the current context for the next row.
@@ -24,14 +23,12 @@ const KEEPS_CONTEXT: OpsColumnsView = OpsColumnsView {
not_pop: true,
shift: true,
jumpdest_keccak_general: true,
- prover_input: true,
+ push_prover_input: true,
jumps: true,
pc_push0: true,
- push: true,
dup_swap: true,
context_op: false,
- mstore_32bytes: true,
- mload_32bytes: true,
+ m_op_32bytes: true,
exit_kernel: true,
m_op_general: true,
syscall: true,
@@ -85,8 +82,10 @@ fn eval_packed_get(
// If the opcode is GET_CONTEXT, then lv.opcode_bits[0] = 0.
let filter = lv.op.context_op * (P::ONES - lv.opcode_bits[0]);
let new_stack_top = nv.mem_channels[0].value;
- yield_constr.constraint(filter * (new_stack_top[0] - lv.context));
- for &limb in &new_stack_top[1..] {
+ // Context is scaled by 2^64, hence stored in the 3rd limb.
+ yield_constr.constraint(filter * (new_stack_top[2] - lv.context));
+
+ for (_, &limb) in new_stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
yield_constr.constraint(filter * limb);
}
@@ -94,17 +93,12 @@ fn eval_packed_get(
yield_constr.constraint(filter * (nv.stack_len - (lv.stack_len + P::ONES)));
// Unused channels.
- for i in 1..NUM_GP_CHANNELS {
- if i != 3 {
- let channel = lv.mem_channels[i];
- yield_constr.constraint(filter * channel.used);
- }
- }
+ disable_unused_channels(lv, filter, vec![1], yield_constr);
yield_constr.constraint(filter * nv.mem_channels[0].used);
}
/// Circuit version of `eval_packed_get`.
-/// Evalutes constraints for GET_CONTEXT.
+/// Evaluates constraints for GET_CONTEXT.
fn eval_ext_circuit_get, const D: usize>(
builder: &mut CircuitBuilder,
lv: &CpuColumnsView>,
@@ -115,12 +109,14 @@ fn eval_ext_circuit_get, const D: usize>(
let prod = builder.mul_extension(lv.op.context_op, lv.opcode_bits[0]);
let filter = builder.sub_extension(lv.op.context_op, prod);
let new_stack_top = nv.mem_channels[0].value;
+ // Context is scaled by 2^64, hence stored in the 3rd limb.
{
- let diff = builder.sub_extension(new_stack_top[0], lv.context);
+ let diff = builder.sub_extension(new_stack_top[2], lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
- for &limb in &new_stack_top[1..] {
+
+ for (_, &limb) in new_stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
@@ -134,13 +130,7 @@ fn eval_ext_circuit_get, const D: usize>(
}
// Unused channels.
- for i in 1..NUM_GP_CHANNELS {
- if i != 3 {
- let channel = lv.mem_channels[i];
- let constr = builder.mul_extension(filter, channel.used);
- yield_constr.constraint(builder, constr);
- }
- }
+ disable_unused_channels_circuit(builder, lv, filter, vec![1], yield_constr);
{
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
@@ -155,33 +145,16 @@ fn eval_packed_set(
) {
let filter = lv.op.context_op * lv.opcode_bits[0];
let stack_top = lv.mem_channels[0].value;
- let write_old_sp_channel = lv.mem_channels[1];
- let read_new_sp_channel = lv.mem_channels[2];
- let ctx_metadata_segment = P::Scalar::from_canonical_u64(Segment::ContextMetadata as u64);
- let stack_size_field = P::Scalar::from_canonical_u64(ContextMetadata::StackSize as u64);
- let local_sp_dec = lv.stack_len - P::ONES;
// The next row's context is read from stack_top.
- yield_constr.constraint(filter * (stack_top[0] - nv.context));
-
- // The old SP is decremented (since the new context was popped) and written to memory.
- yield_constr.constraint(filter * (write_old_sp_channel.value[0] - local_sp_dec));
- for limb in &write_old_sp_channel.value[1..] {
- yield_constr.constraint(filter * *limb);
+ yield_constr.constraint(filter * (stack_top[2] - nv.context));
+ for (_, &limb) in stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
+ yield_constr.constraint(filter * limb);
}
- yield_constr.constraint(filter * (write_old_sp_channel.used - P::ONES));
- yield_constr.constraint(filter * write_old_sp_channel.is_read);
- yield_constr.constraint(filter * (write_old_sp_channel.addr_context - lv.context));
- yield_constr.constraint(filter * (write_old_sp_channel.addr_segment - ctx_metadata_segment));
- yield_constr.constraint(filter * (write_old_sp_channel.addr_virtual - stack_size_field));
+ // The old SP is decremented (since the new context was popped) and stored in memory.
// The new SP is loaded from memory.
- yield_constr.constraint(filter * (read_new_sp_channel.value[0] - nv.stack_len));
- yield_constr.constraint(filter * (read_new_sp_channel.used - P::ONES));
- yield_constr.constraint(filter * (read_new_sp_channel.is_read - P::ONES));
- yield_constr.constraint(filter * (read_new_sp_channel.addr_context - nv.context));
- yield_constr.constraint(filter * (read_new_sp_channel.addr_segment - ctx_metadata_segment));
- yield_constr.constraint(filter * (read_new_sp_channel.addr_virtual - stack_size_field));
+ // This is all done with CTLs: nothing is constrained here.
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
@@ -190,21 +163,19 @@ fn eval_packed_set(
* (lv.general.stack().stack_inv_aux * lv.opcode_bits[0]
- lv.general.stack().stack_inv_aux_2),
);
- // The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
- yield_constr.constraint(
- lv.op.context_op
- * lv.general.stack().stack_inv_aux_2
- * (lv.mem_channels[3].value[0] - new_top_channel.value[0]),
- );
- for &limb in &new_top_channel.value[1..] {
- yield_constr.constraint(lv.op.context_op * lv.general.stack().stack_inv_aux_2 * limb);
+ // The new top is loaded in memory channel 2, if the stack isn't empty (see eval_packed).
+ for (&limb_new_top, &limb_read_top) in new_top_channel
+ .value
+ .iter()
+ .zip(lv.mem_channels[2].value.iter())
+ {
+ yield_constr.constraint(
+ lv.op.context_op * lv.general.stack().stack_inv_aux_2 * (limb_new_top - limb_read_top),
+ );
}
// Unused channels.
- for i in 4..NUM_GP_CHANNELS {
- let channel = lv.mem_channels[i];
- yield_constr.constraint(filter * channel.used);
- }
+ disable_unused_channels(lv, filter, vec![1], yield_constr);
yield_constr.constraint(filter * new_top_channel.used);
}
@@ -218,87 +189,21 @@ fn eval_ext_circuit_set, const D: usize>(
) {
let filter = builder.mul_extension(lv.op.context_op, lv.opcode_bits[0]);
let stack_top = lv.mem_channels[0].value;
- let write_old_sp_channel = lv.mem_channels[1];
- let read_new_sp_channel = lv.mem_channels[2];
- let ctx_metadata_segment = builder.constant_extension(F::Extension::from_canonical_u32(
- Segment::ContextMetadata as u32,
- ));
- let stack_size_field = builder.constant_extension(F::Extension::from_canonical_u32(
- ContextMetadata::StackSize as u32,
- ));
- let one = builder.one_extension();
- let local_sp_dec = builder.sub_extension(lv.stack_len, one);
// The next row's context is read from stack_top.
{
- let diff = builder.sub_extension(stack_top[0], nv.context);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
-
- // The old SP is decremented (since the new context was popped) and written to memory.
- {
- let diff = builder.sub_extension(write_old_sp_channel.value[0], local_sp_dec);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
- for limb in &write_old_sp_channel.value[1..] {
- let constr = builder.mul_extension(filter, *limb);
- yield_constr.constraint(builder, constr);
- }
- {
- let constr = builder.mul_sub_extension(filter, write_old_sp_channel.used, filter);
- yield_constr.constraint(builder, constr);
- }
- {
- let constr = builder.mul_extension(filter, write_old_sp_channel.is_read);
- yield_constr.constraint(builder, constr);
- }
- {
- let diff = builder.sub_extension(write_old_sp_channel.addr_context, lv.context);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
- {
- let diff = builder.sub_extension(write_old_sp_channel.addr_segment, ctx_metadata_segment);
+ let diff = builder.sub_extension(stack_top[2], nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
- {
- let diff = builder.sub_extension(write_old_sp_channel.addr_virtual, stack_size_field);
- let constr = builder.mul_extension(filter, diff);
+ for (_, &limb) in stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
+ let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
+ // The old SP is decremented (since the new context was popped) and stored in memory.
// The new SP is loaded from memory.
- {
- let diff = builder.sub_extension(read_new_sp_channel.value[0], nv.stack_len);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
- {
- let constr = builder.mul_sub_extension(filter, read_new_sp_channel.used, filter);
- yield_constr.constraint(builder, constr);
- }
- {
- let constr = builder.mul_sub_extension(filter, read_new_sp_channel.is_read, filter);
- yield_constr.constraint(builder, constr);
- }
- {
- let diff = builder.sub_extension(read_new_sp_channel.addr_context, nv.context);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
- {
- let diff = builder.sub_extension(read_new_sp_channel.addr_segment, ctx_metadata_segment);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
- {
- let diff = builder.sub_extension(read_new_sp_channel.addr_virtual, stack_size_field);
- let constr = builder.mul_extension(filter, diff);
- yield_constr.constraint(builder, constr);
- }
+ // This is all done with CTLs: nothing is constrained here.
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
@@ -311,25 +216,20 @@ fn eval_ext_circuit_set, const D: usize>(
let constr = builder.mul_extension(lv.op.context_op, diff);
yield_constr.constraint(builder, constr);
}
- // The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
+ // The new top is loaded in memory channel 2, if the stack isn't empty (see eval_packed).
+ for (&limb_new_top, &limb_read_top) in new_top_channel
+ .value
+ .iter()
+ .zip(lv.mem_channels[2].value.iter())
{
- let diff = builder.sub_extension(lv.mem_channels[3].value[0], new_top_channel.value[0]);
+ let diff = builder.sub_extension(limb_new_top, limb_read_top);
let prod = builder.mul_extension(lv.general.stack().stack_inv_aux_2, diff);
let constr = builder.mul_extension(lv.op.context_op, prod);
yield_constr.constraint(builder, constr);
}
- for &limb in &new_top_channel.value[1..] {
- let prod = builder.mul_extension(lv.general.stack().stack_inv_aux_2, limb);
- let constr = builder.mul_extension(lv.op.context_op, prod);
- yield_constr.constraint(builder, constr);
- }
// Unused channels.
- for i in 4..NUM_GP_CHANNELS {
- let channel = lv.mem_channels[i];
- let constr = builder.mul_extension(filter, channel.used);
- yield_constr.constraint(builder, constr);
- }
+ disable_unused_channels_circuit(builder, lv, filter, vec![1], yield_constr);
{
let constr = builder.mul_extension(filter, new_top_channel.used);
yield_constr.constraint(builder, constr);
@@ -337,7 +237,7 @@ fn eval_ext_circuit_set, const D: usize>(
}
/// Evaluates the constraints for the GET and SET opcodes.
-pub fn eval_packed(
+pub(crate) fn eval_packed(
lv: &CpuColumnsView,
nv: &CpuColumnsView
,
yield_constr: &mut ConstraintConsumer
,
@@ -347,10 +247,10 @@ pub fn eval_packed(
eval_packed_set(lv, nv, yield_constr);
// Stack constraints.
- // Both operations use memory channel 3. The operations are similar enough that
+ // Both operations use memory channel 2. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
- let channel = lv.mem_channels[3];
+ let channel = lv.mem_channels[2];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let stack_len = nv.stack_len - (P::ONES - lv.opcode_bits[0]);
@@ -367,7 +267,8 @@ pub fn eval_packed(
yield_constr.constraint(new_filter * (channel.addr_context - nv.context));
// Same segment for both.
yield_constr.constraint(
- new_filter * (channel.addr_segment - P::Scalar::from_canonical_u64(Segment::Stack as u64)),
+ new_filter
+ * (channel.addr_segment - P::Scalar::from_canonical_usize(Segment::Stack.unscale())),
);
// The address is one less than stack_len.
let addr_virtual = stack_len - P::ONES;
@@ -376,7 +277,7 @@ pub fn eval_packed(
/// Circuit version of èval_packed`.
/// Evaluates the constraints for the GET and SET opcodes.
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut CircuitBuilder,
lv: &CpuColumnsView>,
nv: &CpuColumnsView>,
@@ -387,10 +288,10 @@ pub fn eval_ext_circuit, const D: usize>(
eval_ext_circuit_set(builder, lv, nv, yield_constr);
// Stack constraints.
- // Both operations use memory channel 3. The operations are similar enough that
+ // Both operations use memory channel 2. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
- let channel = lv.mem_channels[3];
+ let channel = lv.mem_channels[2];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let diff = builder.add_const_extension(lv.opcode_bits[0], -F::ONE);
@@ -428,7 +329,7 @@ pub fn eval_ext_circuit, const D: usize>(
{
let diff = builder.add_const_extension(
channel.addr_segment,
- -F::from_canonical_u64(Segment::Stack as u64),
+ -F::from_canonical_usize(Segment::Stack.unscale()),
);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs
index adaee51123..bde5930572 100644
--- a/evm/src/cpu/control_flow.rs
+++ b/evm/src/cpu/control_flow.rs
@@ -8,7 +8,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::{CpuColumnsView, COL_MAP};
use crate::cpu::kernel::aggregator::KERNEL;
-const NATIVE_INSTRUCTIONS: [usize; 13] = [
+const NATIVE_INSTRUCTIONS: [usize; 12] = [
COL_MAP.op.binary_op,
COL_MAP.op.ternary_op,
COL_MAP.op.fp254_op,
@@ -17,7 +17,7 @@ const NATIVE_INSTRUCTIONS: [usize; 13] = [
COL_MAP.op.not_pop,
COL_MAP.op.shift,
COL_MAP.op.jumpdest_keccak_general,
- COL_MAP.op.prover_input,
+ // Not PROVER_INPUT: it is dealt with manually below.
// not JUMPS (possible need to jump)
COL_MAP.op.pc_push0,
// not PUSH (need to increment by more than 1)
@@ -43,7 +43,7 @@ pub(crate) fn get_start_pc() -> F {
}
/// Evaluates the constraints related to the flow of instructions.
-pub fn eval_packed_generic(
+pub(crate) fn eval_packed_generic(
lv: &CpuColumnsView,
nv: &CpuColumnsView
,
yield_constr: &mut ConstraintConsumer
,
@@ -51,7 +51,7 @@ pub fn eval_packed_generic(
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
- let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next;
+ let next_halt_state = P::ONES - is_cpu_cycle_next;
// Once we start executing instructions, then we continue until the end of the table
// or we reach dummy padding rows. This, along with the constraints on the first row,
@@ -70,6 +70,13 @@ pub fn eval_packed_generic(
yield_constr
.constraint_transition(is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode));
+ // Apply the same checks as before, for PROVER_INPUT.
+ let is_prover_input: P = lv.op.push_prover_input * (lv.opcode_bits[5] - P::ONES);
+ yield_constr.constraint_transition(
+ is_prover_input * (lv.program_counter - nv.program_counter + P::ONES),
+ );
+ yield_constr.constraint_transition(is_prover_input * (lv.is_kernel_mode - nv.is_kernel_mode));
+
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and
@@ -83,7 +90,7 @@ pub fn eval_packed_generic(
/// Circuit version of `eval_packed`.
/// Evaluates the constraints related to the flow of instructions.
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder,
lv: &CpuColumnsView