-
Notifications
You must be signed in to change notification settings - Fork 193
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
use number of partitions for buffer allocation instead of partition size #339
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,7 @@ | |
|
||
use alloc::vec::Vec; | ||
|
||
use air::PartitionOptions; | ||
use crypto::{ElementHasher, VectorCommitment}; | ||
use math::{fft, FieldElement, StarkField}; | ||
#[cfg(feature = "concurrent")] | ||
|
@@ -180,14 +181,17 @@ impl<E: FieldElement> RowMatrix<E> { | |
/// * A vector commitment is computed for the resulting vector using the specified vector | ||
/// commitment scheme. | ||
/// * The resulting vector commitment is returned as the commitment to the entire matrix. | ||
pub fn commit_to_rows<H, V>(&self, partition_size: usize) -> V | ||
pub fn commit_to_rows<H, V>(&self, partition_options: PartitionOptions) -> V | ||
where | ||
H: ElementHasher<BaseField = E::BaseField>, | ||
V: VectorCommitment<H>, | ||
{ | ||
// allocate vector to store row hashes | ||
let mut row_hashes = unsafe { uninit_vector::<H::Digest>(self.num_rows()) }; | ||
|
||
let partition_size = partition_options.partition_size::<E>(self.num_cols()); | ||
let num_partitions = partition_options.num_partitons(); | ||
Comment on lines
+192
to
+193
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Question: should this be the "specified" number or partitions or the "implied" number of partitions? For example, let's say we have 7 columns in the degree 2 extension field and the specified number of partitions is 4 with min partition size being 8. With these parameters, the implied number of partitions is actually 2 (because partition size would be 4 columns and there are 7 columns total). So, would we want to use 2 or 4 for the number of partitions? |
||
|
||
if partition_size == self.num_cols() * E::EXTENSION_DEGREE { | ||
// iterate though matrix rows, hashing each row | ||
batch_iter_mut!( | ||
|
@@ -205,7 +209,7 @@ impl<E: FieldElement> RowMatrix<E> { | |
&mut row_hashes, | ||
128, // min batch size | ||
|batch: &mut [H::Digest], batch_offset: usize| { | ||
let mut buffer = vec![H::Digest::default(); partition_size]; | ||
let mut buffer = vec![H::Digest::default(); num_partitions]; | ||
for (i, row_hash) in batch.iter_mut().enumerate() { | ||
self.row(batch_offset + i) | ||
.chunks(partition_size) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -43,7 +43,7 @@ pub struct DefaultTraceLde< | |
aux_segment_oracles: Option<V>, | ||
blowup: usize, | ||
trace_info: TraceInfo, | ||
partition_option: PartitionOptions, | ||
partition_options: PartitionOptions, | ||
_h: PhantomData<H>, | ||
} | ||
|
||
|
@@ -64,14 +64,14 @@ where | |
trace_info: &TraceInfo, | ||
main_trace: &ColMatrix<E::BaseField>, | ||
domain: &StarkDomain<E::BaseField>, | ||
partition_option: PartitionOptions, | ||
partition_options: PartitionOptions, | ||
) -> (Self, TracePolyTable<E>) { | ||
// extend the main execution trace and build a commitment to the extended trace | ||
let (main_segment_lde, main_segment_vector_com, main_segment_polys) = | ||
build_trace_commitment::<E, E::BaseField, H, V>( | ||
main_trace, | ||
domain, | ||
partition_option.partition_size::<E::BaseField>(main_trace.num_cols()), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Compilation error. Needs to be |
||
partition_options.partition_size::<E::BaseField>(main_trace.num_cols()), | ||
); | ||
|
||
let trace_poly_table = TracePolyTable::new(main_segment_polys); | ||
|
@@ -82,7 +82,7 @@ where | |
aux_segment_oracles: None, | ||
blowup: domain.trace_to_lde_blowup(), | ||
trace_info: trace_info.clone(), | ||
partition_option, | ||
partition_options, | ||
_h: PhantomData, | ||
}; | ||
|
||
|
@@ -148,11 +148,7 @@ where | |
) -> (ColMatrix<E>, H::Digest) { | ||
// extend the auxiliary trace segment and build a commitment to the extended trace | ||
let (aux_segment_lde, aux_segment_oracles, aux_segment_polys) = | ||
build_trace_commitment::<E, E, H, Self::VC>( | ||
aux_trace, | ||
domain, | ||
self.partition_option.partition_size::<E>(aux_trace.num_cols()), | ||
); | ||
build_trace_commitment::<E, E, H, Self::VC>(aux_trace, domain, self.partition_options); | ||
|
||
// check errors | ||
assert!( | ||
|
@@ -276,7 +272,7 @@ where | |
fn build_trace_commitment<E, F, H, V>( | ||
trace: &ColMatrix<F>, | ||
domain: &StarkDomain<E::BaseField>, | ||
partition_size: usize, | ||
partition_options: PartitionOptions, | ||
) -> (RowMatrix<F>, V, ColMatrix<F>) | ||
where | ||
E: FieldElement, | ||
|
@@ -306,7 +302,7 @@ where | |
// build trace commitment | ||
let commitment_domain_size = trace_lde.num_rows(); | ||
let trace_vector_com = info_span!("compute_execution_trace_commitment", commitment_domain_size) | ||
.in_scope(|| trace_lde.commit_to_rows::<H, V>(partition_size)); | ||
.in_scope(|| trace_lde.commit_to_rows::<H, V>(partition_options)); | ||
assert_eq!(trace_vector_com.domain_len(), commitment_domain_size); | ||
|
||
(trace_lde, trace_vector_com, trace_polys) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We may need to make this a bit more sophisticated because I think it will produce rather suboptimal results in some situations. For example:
num_columns = 7
E::EXTENSION_DEGREE = 3
self.num_partitions = 4
self.min_partition_size = 8
If I did my math correctly, with this approach, we'd get partition size of 3 columns which would imply 9 base field columns per partition. This would require 2 permutations of the hash function in each partition.
The previous approach would have actually resulted in a better outcome here (i.e., partition size 2, so so the 4 partitions would have 2, 2, 2, 1 columns). But this result would have been technically incorrect because we'd have 6 base field elements per partition and this would be smaller than
min_partition_size
.Maybe instead of
min_partition_size
we should be specifying the number of base elements that can be absorbed per permutation and then we can adjust this algorithm to output more optimal results.