diff --git a/ecc/bls12-377/fr/fri/fri_test.go b/ecc/bls12-377/fr/fri/fri_test.go index f5cc9e4760..0da4f02a7e 100644 --- a/ecc/bls12-377/fr/fri/fri_test.go +++ b/ecc/bls12-377/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bls12-377/fr/mimc/doc.go b/ecc/bls12-377/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bls12-377/fr/mimc/doc.go +++ b/ecc/bls12-377/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bls12-377/fr/pedersen/pedersen.go b/ecc/bls12-377/fr/pedersen/pedersen.go index b09acbd223..1bbf3a0a24 100644 --- a/ecc/bls12-377/fr/pedersen/pedersen.go +++ b/ecc/bls12-377/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bls12-377/fr/pedersen/pedersen_test.go b/ecc/bls12-377/fr/pedersen/pedersen_test.go index 75c11fdae0..28c98cfc30 100644 --- a/ecc/bls12-377/fr/pedersen/pedersen_test.go +++ b/ecc/bls12-377/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bls12-377" "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bls12-377/fr/sis/sis.go b/ecc/bls12-377/fr/sis/sis.go index 1279c8145b..954867a93f 100644 --- a/ecc/bls12-377/fr/sis/sis.go +++ b/ecc/bls12-377/fr/sis/sis.go @@ -52,7 +52,7 @@ type RSis struct { // domain for the polynomial multiplication Domain *fft.Domain - twiddleCosets []fr.Element // see fft64 and precomputeTwiddlesCoset + twiddleCosets []fr.Element // see FFT64 and precomputeTwiddlesCoset // d, the degree of X^{d}+1 Degree int @@ -129,7 +129,7 @@ func NewRSis(seed int64, logTwoDegree, logTwoBound, maxNbElementsToHash int) (*R } if r.LogTwoBound == 8 && r.Degree == 64 { // TODO @gbotrel fixme, that's dirty. - r.twiddleCosets = precomputeTwiddlesCoset(r.Domain.Generator, r.Domain.FrMultiplicativeGen) + r.twiddleCosets = PrecomputeTwiddlesCoset(r.Domain.Generator, r.Domain.FrMultiplicativeGen) } // filling A @@ -199,7 +199,7 @@ func (r *RSis) Sum(b []byte) []byte { k := m[i*r.Degree : (i+1)*r.Degree] if fastPath { // fast path. - fft64(k, r.twiddleCosets) + FFT64(k, r.twiddleCosets) } else { r.Domain.FFT(k, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) } @@ -337,7 +337,7 @@ func LimbDecomposeBytes(buf []byte, m fr.Vector, logTwoBound int) { // big-endian form into an array of limbs representing the same field elements // in little-endian form. Namely, if our field is represented with 64 bits and we // have the following field element 0x0123456789abcdef (0 being the most significant -// character and and f being the least significant one) and our log norm bound is +// character and and f being the least significant one) and our norm bound is // 16 (so 1 hex character = 1 limb). The function assigns the values of m to [f, e, // d, c, b, a, ..., 3, 2, 1, 0]. m should be preallocated and zeroized. mValues is // an optional bitSet. If provided, it must be empty. The function will set bit "i" @@ -374,7 +374,7 @@ func limbDecomposeBytes(buf []byte, m fr.Vector, logTwoBound, degree int, mValue // and set the bits from LSB to MSB. at := fieldStart + fr.Bytes*8 - bitInField - 1 - m[mPos][0] |= uint64(bitAt(at) << j) + m[mPos][0] |= uint64(bitAt(at)) << j bitInField++ // Check if mPos is zero and mark as non-zero in the bitset if not diff --git a/ecc/bls12-377/fr/sis/sis_fft.go b/ecc/bls12-377/fr/sis/sis_fft.go index ae351d5ec1..46e583004d 100644 --- a/ecc/bls12-377/fr/sis/sis_fft.go +++ b/ecc/bls12-377/fr/sis/sis_fft.go @@ -21,10 +21,10 @@ import ( "math/big" ) -// fft64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements +// FFT64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements // equivalent code: r.Domain.FFT(k, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) -// twiddlesCoset must be pre-computed from twiddles and coset table, see precomputeTwiddlesCoset -func fft64(a []fr.Element, twiddlesCoset []fr.Element) { +// twiddlesCoset must be pre-computed from twiddles and coset table, see PrecomputeTwiddlesCoset +func FFT64(a []fr.Element, twiddlesCoset []fr.Element) { a[32].Mul(&a[32], &twiddlesCoset[0]) a[33].Mul(&a[33], &twiddlesCoset[0]) @@ -412,9 +412,9 @@ func fft64(a []fr.Element, twiddlesCoset []fr.Element) { fr.Butterfly(&a[62], &a[63]) } -// precomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table +// PrecomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table // it then return all elements in the correct order for the unrolled FFT. -func precomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { +func PrecomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { toReturn := make([]fr.Element, 63) var r, s fr.Element e := new(big.Int) diff --git a/ecc/bls12-377/fr/sis/sis_test.go b/ecc/bls12-377/fr/sis/sis_test.go index 3df4ff66a0..9243aede27 100644 --- a/ecc/bls12-377/fr/sis/sis_test.go +++ b/ecc/bls12-377/fr/sis/sis_test.go @@ -172,62 +172,97 @@ func TestLimbDecomposition(t *testing.T) { t.Skip("skipping this test in 32bit.") } - sis, _ := NewRSis(0, 4, 4, 3) - - testcases := []fr.Vector{ - {fr.One()}, - {fr.NewElement(2)}, - {fr.NewElement(1 << 32), fr.NewElement(2), fr.NewElement(1)}, + testcases := []struct { + logTwoDegree, logTwoBound int + vec fr.Vector + }{ + { + logTwoDegree: 4, + logTwoBound: 4, + vec: fr.Vector{fr.One()}, + }, + { + logTwoDegree: 4, + logTwoBound: 4, + vec: fr.Vector{fr.NewElement(2)}, + }, + { + logTwoDegree: 4, + logTwoBound: 4, + vec: fr.Vector{fr.NewElement(1 << 32), fr.NewElement(2), fr.NewElement(1)}, + }, + { + logTwoDegree: 4, + logTwoBound: 16, + vec: fr.Vector{fr.One()}, + }, + { + logTwoDegree: 4, + logTwoBound: 16, + vec: fr.Vector{fr.NewElement(2)}, + }, + { + logTwoDegree: 4, + logTwoBound: 16, + vec: fr.Vector{fr.NewElement(1 << 32), fr.NewElement(2), fr.NewElement(1)}, + }, } - for _, testcase := range testcases { + for i, testcase := range testcases { - // clean the sis hasher - sis.bufMValues.ClearAll() - for i := 0; i < len(sis.bufM); i++ { - sis.bufM[i].SetZero() - } - for i := 0; i < len(sis.bufRes); i++ { - sis.bufRes[i].SetZero() - } + t.Run(fmt.Sprintf("testcase-%v", i), func(t *testing.T) { - buf := bytes.Buffer{} - for _, x := range testcase { - xBytes := x.Bytes() - buf.Write(xBytes[:]) - } - limbDecomposeBytes(buf.Bytes(), sis.bufM, sis.LogTwoBound, sis.Degree, sis.bufMValues) - - // Just to test, this does not return panic - dummyBuffer := make(fr.Vector, 192) - LimbDecomposeBytes(buf.Bytes(), dummyBuffer, sis.LogTwoBound) - - // b is a field element representing the max norm bound - // used for limb splitting the input field elements. - b := fr.NewElement(1 << sis.LogTwoBound) - numLimbsPerField := fr.Bytes * 8 / sis.LogTwoBound - - // Compute r (corresponds to the Montgommery constant) - var r fr.Element - r.SetString("6014086494747379908336260804527802945383293308637734276299549080986809532403") - - // Attempt to recompose the entry #i in the test-case - for i := range testcase { - // allegedly corresponds to the limbs of the entry i - subRes := sis.bufM[i*numLimbsPerField : (i+1)*numLimbsPerField] - - // performs a Horner evaluation of subres by b - var y fr.Element - for j := numLimbsPerField - 1; j >= 0; j-- { - y.Mul(&y, &b) - y.Add(&y, &subRes[j]) + t.Logf("testcase %v", testcase) + + sis, _ := NewRSis(0, testcase.logTwoDegree, testcase.logTwoBound, 3) + + // clean the sis hasher + sis.bufMValues.ClearAll() + for i := 0; i < len(sis.bufM); i++ { + sis.bufM[i].SetZero() + } + for i := 0; i < len(sis.bufRes); i++ { + sis.bufRes[i].SetZero() } - fmt.Printf("subres: %v\n", subRes) + buf := bytes.Buffer{} + for _, x := range testcase.vec { + xBytes := x.Bytes() + buf.Write(xBytes[:]) + } + + limbDecomposeBytes(buf.Bytes(), sis.bufM, sis.LogTwoBound, sis.Degree, sis.bufMValues) + + // Just to test, this does not return panic + dummyBuffer := make(fr.Vector, 192) + LimbDecomposeBytes(buf.Bytes(), dummyBuffer, sis.LogTwoBound) + + // b is a field element representing the max norm bound + // used for limb splitting the input field elements. + b := fr.NewElement(1 << sis.LogTwoBound) + numLimbsPerField := fr.Bytes * 8 / sis.LogTwoBound + + // Compute r (corresponds to the Montgommery constant) + var r fr.Element + r.SetString("6014086494747379908336260804527802945383293308637734276299549080986809532403") + + // Attempt to recompose the entry #i in the test-case + for i := range testcase.vec { + // allegedly corresponds to the limbs of the entry i + subRes := sis.bufM[i*numLimbsPerField : (i+1)*numLimbsPerField] + + // performs a Horner evaluation of subres by b + var y fr.Element + for j := numLimbsPerField - 1; j >= 0; j-- { + y.Mul(&y, &b) + y.Add(&y, &subRes[j]) + } + + y.Mul(&y, &r) + require.Equal(t, testcase.vec[i].String(), y.String(), "the subRes was %v", subRes) + } + }) - y.Mul(&y, &r) - require.Equal(t, testcase[i].String(), y.String(), "the subRes was %v", subRes) - } } } @@ -427,8 +462,8 @@ func TestUnrolledFFT(t *testing.T) { domain.FFT(k1, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) // unrolled FFT - twiddlesCoset := precomputeTwiddlesCoset(domain.Generator, domain.FrMultiplicativeGen) - fft64(k2, twiddlesCoset) + twiddlesCoset := PrecomputeTwiddlesCoset(domain.Generator, domain.FrMultiplicativeGen) + FFT64(k2, twiddlesCoset) // compare results for i := 0; i < size; i++ { diff --git a/ecc/bls12-377/g1.go b/ecc/bls12-377/g1.go index 2dcabfc5c4..246583da63 100644 --- a/ecc/bls12-377/g1.go +++ b/ecc/bls12-377/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -564,14 +616,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -635,6 +686,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bls12-377/g1_test.go b/ecc/bls12-377/g1_test.go index 0ec94c0c80..c2558e677c 100644 --- a/ecc/bls12-377/g1_test.go +++ b/ecc/bls12-377/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-377-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-377] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-377] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-377] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-377] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-377/g2.go b/ecc/bls12-377/g2.go index fc51704c5f..04a5031c0c 100644 --- a/ecc/bls12-377/g2.go +++ b/ecc/bls12-377/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E2 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E2 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bls12-377/g2_test.go b/ecc/bls12-377/g2_test.go index 3702b58a50..6e7876ee6a 100644 --- a/ecc/bls12-377/g2_test.go +++ b/ecc/bls12-377/g2_test.go @@ -248,7 +248,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-377-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-377] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-377] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-377] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -279,7 +344,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-377] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-377] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E2) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -770,6 +835,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E2) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-377/kzg/kzg.go b/ecc/bls12-377/kzg/kzg.go index 3458d655ef..592858d9e9 100644 --- a/ecc/bls12-377/kzg/kzg.go +++ b/ecc/bls12-377/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bls12377.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls12377.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls12377.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bls12377.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bls12377.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bls12377.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bls12377.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bls12377.PairingCheckFixedQ( - []bls12377.G1Affine{totalG1Aff, negH}, + []bls12377.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bls12-377/kzg/kzg_test.go b/ecc/bls12-377/kzg/kzg_test.go index c35b82a077..8110a9be4a 100644 --- a/ecc/bls12-377/kzg/kzg_test.go +++ b/ecc/bls12-377/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/gnark-crypto/ecc/bls12-377/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bls12377.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bls12-377/kzg/marshal.go b/ecc/bls12-377/kzg/marshal.go index 7640734b35..64e344ee4e 100644 --- a/ecc/bls12-377/kzg/marshal.go +++ b/ecc/bls12-377/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bls12-377" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bls12377.Encoder)) return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bls12377.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bls12377.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bls12-377/twistededwards/point.go b/ecc/bls12-377/twistededwards/point.go index 9359b12ffc..4bd6487dfc 100644 --- a/ecc/bls12-377/twistededwards/point.go +++ b/ecc/bls12-377/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bls12-378/fr/fri/fri_test.go b/ecc/bls12-378/fr/fri/fri_test.go index 60988ffa12..8e613e8105 100644 --- a/ecc/bls12-378/fr/fri/fri_test.go +++ b/ecc/bls12-378/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bls12-378/fr/mimc/doc.go b/ecc/bls12-378/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bls12-378/fr/mimc/doc.go +++ b/ecc/bls12-378/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bls12-378/fr/pedersen/pedersen.go b/ecc/bls12-378/fr/pedersen/pedersen.go index be32bdca46..80ffed40ba 100644 --- a/ecc/bls12-378/fr/pedersen/pedersen.go +++ b/ecc/bls12-378/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bls12-378/fr/pedersen/pedersen_test.go b/ecc/bls12-378/fr/pedersen/pedersen_test.go index d94ac20ef6..92ea7d60db 100644 --- a/ecc/bls12-378/fr/pedersen/pedersen_test.go +++ b/ecc/bls12-378/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bls12-378" "github.com/consensys/gnark-crypto/ecc/bls12-378/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bls12-378/g1.go b/ecc/bls12-378/g1.go index 14b7788513..5ed4a983f7 100644 --- a/ecc/bls12-378/g1.go +++ b/ecc/bls12-378/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -564,14 +616,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -635,6 +686,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bls12-378/g1_test.go b/ecc/bls12-378/g1_test.go index ebd0bc648d..8fd0e260bc 100644 --- a/ecc/bls12-378/g1_test.go +++ b/ecc/bls12-378/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-378-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-378] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-378] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-378] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-378] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-378/g2.go b/ecc/bls12-378/g2.go index f5b92cf943..c076ae5f01 100644 --- a/ecc/bls12-378/g2.go +++ b/ecc/bls12-378/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E2 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E2 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bls12-378/g2_test.go b/ecc/bls12-378/g2_test.go index be2ab68608..ac43de8601 100644 --- a/ecc/bls12-378/g2_test.go +++ b/ecc/bls12-378/g2_test.go @@ -248,7 +248,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-378-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-378] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-378] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-378] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -279,7 +344,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-378] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-378] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E2) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -770,6 +835,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E2) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-378/kzg/kzg.go b/ecc/bls12-378/kzg/kzg.go index 3624d41e07..1a2b7f26e7 100644 --- a/ecc/bls12-378/kzg/kzg.go +++ b/ecc/bls12-378/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bls12378.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls12378.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls12378.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bls12378.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bls12378.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bls12378.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bls12378.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bls12378.PairingCheckFixedQ( - []bls12378.G1Affine{totalG1Aff, negH}, + []bls12378.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bls12-378/kzg/kzg_test.go b/ecc/bls12-378/kzg/kzg_test.go index 61b1ed5e0c..156a1730de 100644 --- a/ecc/bls12-378/kzg/kzg_test.go +++ b/ecc/bls12-378/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-378/fr" "github.com/consensys/gnark-crypto/ecc/bls12-378/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bls12378.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bls12-378/kzg/marshal.go b/ecc/bls12-378/kzg/marshal.go index 9203b44fc3..f3349849c9 100644 --- a/ecc/bls12-378/kzg/marshal.go +++ b/ecc/bls12-378/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bls12-378" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bls12378.Encoder)) return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bls12378.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bls12378.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bls12-378/twistededwards/point.go b/ecc/bls12-378/twistededwards/point.go index c66be4acab..db5ed27dc4 100644 --- a/ecc/bls12-378/twistededwards/point.go +++ b/ecc/bls12-378/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bls12-381/bandersnatch/endomorpism.go b/ecc/bls12-381/bandersnatch/endomorpism.go index d68fdfc4e9..bc62712a45 100644 --- a/ecc/bls12-381/bandersnatch/endomorpism.go +++ b/ecc/bls12-381/bandersnatch/endomorpism.go @@ -1,7 +1,6 @@ package bandersnatch import ( - "math" "math/big" "github.com/consensys/gnark-crypto/ecc" @@ -30,14 +29,13 @@ func (p *PointProj) phi(p1 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication (GLV) of a point +// scalarMulGLV is the GLV scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int func (p *PointProj) scalarMulGLV(p1 *PointProj, scalar *big.Int) *PointProj { initOnce.Do(initCurveParams) var table [15]PointProj - var zero big.Int var res PointProj var k1, k2 fr.Element @@ -50,11 +48,11 @@ func (p *PointProj) scalarMulGLV(p1 *PointProj, scalar *big.Int) *PointProj { // split the scalar, modifies +-p1, phi(p1) accordingly k := ecc.SplitScalar(scalar, &curveParams.glvBasis) - if k[0].Cmp(&zero) == -1 { + if k[0].Sign() == -1 { k[0].Neg(&k[0]) table[0].Neg(&table[0]) } - if k[1].Cmp(&zero) == -1 { + if k[1].Sign() == -1 { k[1].Neg(&k[1]) table[3].Neg(&table[3]) } @@ -62,26 +60,33 @@ func (p *PointProj) scalarMulGLV(p1 *PointProj, scalar *big.Int) *PointProj { // precompute table (2 bits sliding window) // table[b3b2b1b0-1] = b3b2*phi(p1) + b1b0*p1 if b3b2b1b0 != 0 table[1].Double(&table[0]) - table[2].Set(&table[1]).Add(&table[2], &table[0]) - table[4].Set(&table[3]).Add(&table[4], &table[0]) - table[5].Set(&table[3]).Add(&table[5], &table[1]) - table[6].Set(&table[3]).Add(&table[6], &table[2]) + table[2].Add(&table[1], &table[0]) + table[4].Add(&table[3], &table[0]) + table[5].Add(&table[3], &table[1]) + table[6].Add(&table[3], &table[2]) table[7].Double(&table[3]) - table[8].Set(&table[7]).Add(&table[8], &table[0]) - table[9].Set(&table[7]).Add(&table[9], &table[1]) - table[10].Set(&table[7]).Add(&table[10], &table[2]) - table[11].Set(&table[7]).Add(&table[11], &table[3]) - table[12].Set(&table[11]).Add(&table[12], &table[0]) - table[13].Set(&table[11]).Add(&table[13], &table[1]) - table[14].Set(&table[11]).Add(&table[14], &table[2]) - - // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 bits long max + table[8].Add(&table[7], &table[0]) + table[9].Add(&table[7], &table[1]) + table[10].Add(&table[7], &table[2]) + table[11].Add(&table[7], &table[3]) + table[12].Add(&table[11], &table[0]) + table[13].Add(&table[11], &table[1]) + table[14].Add(&table[11], &table[2]) + + // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 or len(r)/2+1 bits long max + // this is because we use a probabilistic scalar decomposition that replaces a division by a right-shift k1 = k1.SetBigInt(&k[0]).Bits() k2 = k2.SetBigInt(&k[1]).Bits() - // loop starts from len(k1)/2 due to the bounds - // fr.Limbs == Order.limbs - for i := int(math.Ceil(fr.Limbs/2. - 1)); i >= 0; i-- { + // we don't target constant-timeness so we check first if we increase the bounds or not + maxBit := k1.BitLen() + if k2.BitLen() > maxBit { + maxBit = k2.BitLen() + } + hiWordIndex := (maxBit - 1) / 64 + + // loop starts from len(k1)/2 or len(k1)/2+1 due to the bounds + for i := hiWordIndex; i >= 0; i-- { mask := uint64(3) << 62 for j := 0; j < 32; j++ { res.Double(&res).Double(&res) @@ -121,13 +126,13 @@ func (p *PointExtended) phi(p1 *PointExtended) *PointExtended { return p } -// ScalarMultiplication scalar multiplication (GLV) of a point +// scalarMulGLV is the GLV scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int func (p *PointExtended) scalarMulGLV(p1 *PointExtended, scalar *big.Int) *PointExtended { + initOnce.Do(initCurveParams) var table [15]PointExtended - var zero big.Int var res PointExtended var k1, k2 fr.Element @@ -140,11 +145,11 @@ func (p *PointExtended) scalarMulGLV(p1 *PointExtended, scalar *big.Int) *PointE // split the scalar, modifies +-p1, phi(p1) accordingly k := ecc.SplitScalar(scalar, &curveParams.glvBasis) - if k[0].Cmp(&zero) == -1 { + if k[0].Sign() == -1 { k[0].Neg(&k[0]) table[0].Neg(&table[0]) } - if k[1].Cmp(&zero) == -1 { + if k[1].Sign() == -1 { k[1].Neg(&k[1]) table[3].Neg(&table[3]) } @@ -152,26 +157,33 @@ func (p *PointExtended) scalarMulGLV(p1 *PointExtended, scalar *big.Int) *PointE // precompute table (2 bits sliding window) // table[b3b2b1b0-1] = b3b2*phi(p1) + b1b0*p1 if b3b2b1b0 != 0 table[1].Double(&table[0]) - table[2].Set(&table[1]).Add(&table[2], &table[0]) - table[4].Set(&table[3]).Add(&table[4], &table[0]) - table[5].Set(&table[3]).Add(&table[5], &table[1]) - table[6].Set(&table[3]).Add(&table[6], &table[2]) + table[2].Add(&table[1], &table[0]) + table[4].Add(&table[3], &table[0]) + table[5].Add(&table[3], &table[1]) + table[6].Add(&table[3], &table[2]) table[7].Double(&table[3]) - table[8].Set(&table[7]).Add(&table[8], &table[0]) - table[9].Set(&table[7]).Add(&table[9], &table[1]) - table[10].Set(&table[7]).Add(&table[10], &table[2]) - table[11].Set(&table[7]).Add(&table[11], &table[3]) - table[12].Set(&table[11]).Add(&table[12], &table[0]) - table[13].Set(&table[11]).Add(&table[13], &table[1]) - table[14].Set(&table[11]).Add(&table[14], &table[2]) - - // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 bits long max + table[8].Add(&table[7], &table[0]) + table[9].Add(&table[7], &table[1]) + table[10].Add(&table[7], &table[2]) + table[11].Add(&table[7], &table[3]) + table[12].Add(&table[11], &table[0]) + table[13].Add(&table[11], &table[1]) + table[14].Add(&table[11], &table[2]) + + // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 or len(r)/2+1 bits long max + // this is because we use a probabilistic scalar decomposition that replaces a division by a right-shift k1 = k1.SetBigInt(&k[0]).Bits() k2 = k2.SetBigInt(&k[1]).Bits() - // loop starts from len(k1)/2 due to the bounds - // fr.Limbs == Order.limbs - for i := int(math.Ceil(fr.Limbs/2. - 1)); i >= 0; i-- { + // we don't target constant-timeness so we check first if we increase the bounds or not + maxBit := k1.BitLen() + if k2.BitLen() > maxBit { + maxBit = k2.BitLen() + } + hiWordIndex := (maxBit - 1) / 64 + + // loop starts from len(k1)/2 or len(k1)/2+1 due to the bounds + for i := hiWordIndex; i >= 0; i-- { mask := uint64(3) << 62 for j := 0; j < 32; j++ { res.Double(&res).Double(&res) diff --git a/ecc/bls12-381/bandersnatch/point.go b/ecc/bls12-381/bandersnatch/point.go index 68b06bc914..ab8691abfc 100644 --- a/ecc/bls12-381/bandersnatch/point.go +++ b/ecc/bls12-381/bandersnatch/point.go @@ -20,6 +20,7 @@ import ( "crypto/subtle" "io" "math/big" + "math/bits" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" ) @@ -418,6 +419,37 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } +// scalarMulWindowed scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { + var _scalar big.Int + _scalar.Set(scalar) + p.Set(p1) + if _scalar.Sign() == -1 { + _scalar.Neg(&_scalar) + p.Neg(p) + } + var resProj PointProj + resProj.setInfinity() + const wordSize = bits.UintSize + sWords := _scalar.Bits() + + for i := len(sWords) - 1; i >= 0; i-- { + ithWord := sWords[i] + for k := 0; k < wordSize; k++ { + resProj.Double(&resProj) + kthBit := (ithWord >> (wordSize - 1 - k)) & 1 + if kthBit == 1 { + resProj.Add(&resProj, p) + } + } + } + + p.Set(&resProj) + return p +} + // ScalarMultiplication scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { @@ -603,6 +635,37 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } +// scalarMulWindowed scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { + var _scalar big.Int + _scalar.Set(scalar) + p.Set(p1) + if _scalar.Sign() == -1 { + _scalar.Neg(&_scalar) + p.Neg(p) + } + var resExtended PointExtended + resExtended.setInfinity() + const wordSize = bits.UintSize + sWords := _scalar.Bits() + + for i := len(sWords) - 1; i >= 0; i-- { + ithWord := sWords[i] + for k := 0; k < wordSize; k++ { + resExtended.Double(&resExtended) + kthBit := (ithWord >> (wordSize - 1 - k)) & 1 + if kthBit == 1 { + resExtended.Add(&resExtended, p) + } + } + } + + p.Set(&resExtended) + return p +} + // ScalarMultiplication scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { diff --git a/ecc/bls12-381/bandersnatch/point_test.go b/ecc/bls12-381/bandersnatch/point_test.go index f5df6db26e..d15969e2ab 100644 --- a/ecc/bls12-381/bandersnatch/point_test.go +++ b/ecc/bls12-381/bandersnatch/point_test.go @@ -525,6 +525,22 @@ func TestOps(t *testing.T) { }, genS1, )) + properties.Property("(projective) GLV and double-and-add scalar multiplications give the same results", prop.ForAll( + func(s1 big.Int) bool { + + params := GetEdwardsCurve() + + var baseProj, p1, p2 PointProj + baseProj.FromAffine(¶ms.Base) + + p1.scalarMulWindowed(&baseProj, &s1) + p2.scalarMulGLV(&baseProj, &s1) + + return p2.Equal(&p1) + + }, + genS1, + )) // extended properties.Property("(extended) 0+0=0", prop.ForAll( @@ -609,6 +625,22 @@ func TestOps(t *testing.T) { }, genS1, )) + properties.Property("(extended) GLV and double-and-add scalar multiplications give the same results", prop.ForAll( + func(s1 big.Int) bool { + + params := GetEdwardsCurve() + + var baseExtended, p1, p2 PointExtended + baseExtended.FromAffine(¶ms.Base) + + p1.scalarMulWindowed(&baseExtended, &s1) + p2.scalarMulGLV(&baseExtended, &s1) + + return p2.Equal(&p1) + + }, + genS1, + )) // mixed affine+extended properties.Property("(mixed affine+extended) P+(-P)=O", prop.ForAll( diff --git a/ecc/bls12-381/fr/fri/fri_test.go b/ecc/bls12-381/fr/fri/fri_test.go index 5e424c0e85..049723ae5a 100644 --- a/ecc/bls12-381/fr/fri/fri_test.go +++ b/ecc/bls12-381/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bls12-381/fr/mimc/doc.go b/ecc/bls12-381/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bls12-381/fr/mimc/doc.go +++ b/ecc/bls12-381/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bls12-381/fr/pedersen/pedersen.go b/ecc/bls12-381/fr/pedersen/pedersen.go index 6e0f51c3e2..32bc01043f 100644 --- a/ecc/bls12-381/fr/pedersen/pedersen.go +++ b/ecc/bls12-381/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bls12-381/fr/pedersen/pedersen_test.go b/ecc/bls12-381/fr/pedersen/pedersen_test.go index 5356f190ea..070d12c0cc 100644 --- a/ecc/bls12-381/fr/pedersen/pedersen_test.go +++ b/ecc/bls12-381/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bls12-381" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bls12-381/g1.go b/ecc/bls12-381/g1.go index b7be03ed38..909ff12f16 100644 --- a/ecc/bls12-381/g1.go +++ b/ecc/bls12-381/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -565,14 +617,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -636,6 +687,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bls12-381/g1_test.go b/ecc/bls12-381/g1_test.go index 05f2f7e25f..7f0fab5a19 100644 --- a/ecc/bls12-381/g1_test.go +++ b/ecc/bls12-381/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-381-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-381] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-381] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-381] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-381] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-381/g2.go b/ecc/bls12-381/g2.go index 8e84b29f1c..5d49b49366 100644 --- a/ecc/bls12-381/g2.go +++ b/ecc/bls12-381/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E2 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E2 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bls12-381/g2_test.go b/ecc/bls12-381/g2_test.go index 1babf7ec8c..d414651c6d 100644 --- a/ecc/bls12-381/g2_test.go +++ b/ecc/bls12-381/g2_test.go @@ -248,7 +248,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS12-381-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS12-381] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS12-381] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS12-381] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -279,7 +344,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS12-381] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS12-381] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E2) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -770,6 +835,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E2) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls12-381/kzg/kzg.go b/ecc/bls12-381/kzg/kzg.go index 42540cd288..9813880c0a 100644 --- a/ecc/bls12-381/kzg/kzg.go +++ b/ecc/bls12-381/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bls12381.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls12381.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls12381.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bls12381.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bls12381.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bls12381.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bls12381.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bls12381.PairingCheckFixedQ( - []bls12381.G1Affine{totalG1Aff, negH}, + []bls12381.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bls12-381/kzg/kzg_test.go b/ecc/bls12-381/kzg/kzg_test.go index d30ff9aed2..cfc72f3878 100644 --- a/ecc/bls12-381/kzg/kzg_test.go +++ b/ecc/bls12-381/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bls12381.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bls12-381/kzg/marshal.go b/ecc/bls12-381/kzg/marshal.go index 5583906193..bbfe032976 100644 --- a/ecc/bls12-381/kzg/marshal.go +++ b/ecc/bls12-381/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bls12-381" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bls12381.Encoder)) return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bls12381.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bls12381.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bls12-381/twistededwards/point.go b/ecc/bls12-381/twistededwards/point.go index fc68f84c4e..32fa82e5d3 100644 --- a/ecc/bls12-381/twistededwards/point.go +++ b/ecc/bls12-381/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bls24-315/fr/fri/fri_test.go b/ecc/bls24-315/fr/fri/fri_test.go index 7bdb3538be..5b316bf7ef 100644 --- a/ecc/bls24-315/fr/fri/fri_test.go +++ b/ecc/bls24-315/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bls24-315/fr/mimc/doc.go b/ecc/bls24-315/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bls24-315/fr/mimc/doc.go +++ b/ecc/bls24-315/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bls24-315/fr/pedersen/pedersen.go b/ecc/bls24-315/fr/pedersen/pedersen.go index 30fcc11d12..8c9a5cc87d 100644 --- a/ecc/bls24-315/fr/pedersen/pedersen.go +++ b/ecc/bls24-315/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bls24-315/fr/pedersen/pedersen_test.go b/ecc/bls24-315/fr/pedersen/pedersen_test.go index 5dda2da1a3..6cb1d71142 100644 --- a/ecc/bls24-315/fr/pedersen/pedersen_test.go +++ b/ecc/bls24-315/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bls24-315" "github.com/consensys/gnark-crypto/ecc/bls24-315/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bls24-315/g1.go b/ecc/bls24-315/g1.go index 4b94076c34..cf01a70eb2 100644 --- a/ecc/bls24-315/g1.go +++ b/ecc/bls24-315/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -566,14 +618,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -637,6 +688,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bls24-315/g1_test.go b/ecc/bls24-315/g1_test.go index b84a440f8c..c2fed674d6 100644 --- a/ecc/bls24-315/g1_test.go +++ b/ecc/bls24-315/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS24-315-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS24-315] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS24-315] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS24-315] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS24-315] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls24-315/g2.go b/ecc/bls24-315/g2.go index 9542fd74b5..31cdcd109a 100644 --- a/ecc/bls24-315/g2.go +++ b/ecc/bls24-315/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E4 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E4 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bls24-315/g2_test.go b/ecc/bls24-315/g2_test.go index ed6f46d545..9fb276cc1b 100644 --- a/ecc/bls24-315/g2_test.go +++ b/ecc/bls24-315/g2_test.go @@ -248,7 +248,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS24-315-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS24-315] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-315] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS24-315] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -279,7 +344,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS24-315] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS24-315] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E4) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -770,6 +835,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E4) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls24-315/kzg/kzg.go b/ecc/bls24-315/kzg/kzg.go index 43e16d9c01..0cd7cfbbf9 100644 --- a/ecc/bls24-315/kzg/kzg.go +++ b/ecc/bls24-315/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bls24315.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls24315.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls24315.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bls24315.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bls24315.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bls24315.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bls24315.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bls24315.PairingCheckFixedQ( - []bls24315.G1Affine{totalG1Aff, negH}, + []bls24315.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bls24-315/kzg/kzg_test.go b/ecc/bls24-315/kzg/kzg_test.go index 310e120249..805b6aaed5 100644 --- a/ecc/bls24-315/kzg/kzg_test.go +++ b/ecc/bls24-315/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls24-315/fr" "github.com/consensys/gnark-crypto/ecc/bls24-315/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bls24315.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bls24-315/kzg/marshal.go b/ecc/bls24-315/kzg/marshal.go index a492a7f4ca..2d5c4ead9f 100644 --- a/ecc/bls24-315/kzg/marshal.go +++ b/ecc/bls24-315/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bls24-315" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bls24315.Encoder)) return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bls24315.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bls24315.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bls24-315/twistededwards/point.go b/ecc/bls24-315/twistededwards/point.go index 31ed71a52f..32868a7466 100644 --- a/ecc/bls24-315/twistededwards/point.go +++ b/ecc/bls24-315/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bls24-317/fr/fri/fri_test.go b/ecc/bls24-317/fr/fri/fri_test.go index 959d801c65..fa6b568979 100644 --- a/ecc/bls24-317/fr/fri/fri_test.go +++ b/ecc/bls24-317/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bls24-317/fr/mimc/doc.go b/ecc/bls24-317/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bls24-317/fr/mimc/doc.go +++ b/ecc/bls24-317/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bls24-317/fr/pedersen/pedersen.go b/ecc/bls24-317/fr/pedersen/pedersen.go index 86fcc5f37c..4585c6be32 100644 --- a/ecc/bls24-317/fr/pedersen/pedersen.go +++ b/ecc/bls24-317/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bls24-317/fr/pedersen/pedersen_test.go b/ecc/bls24-317/fr/pedersen/pedersen_test.go index c0625afa6b..77d48fa718 100644 --- a/ecc/bls24-317/fr/pedersen/pedersen_test.go +++ b/ecc/bls24-317/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bls24-317" "github.com/consensys/gnark-crypto/ecc/bls24-317/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bls24-317/g1.go b/ecc/bls24-317/g1.go index 4f774ea980..68755b91fe 100644 --- a/ecc/bls24-317/g1.go +++ b/ecc/bls24-317/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -567,14 +619,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -638,6 +689,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bls24-317/g1_test.go b/ecc/bls24-317/g1_test.go index e624076b1d..c4909aa9c3 100644 --- a/ecc/bls24-317/g1_test.go +++ b/ecc/bls24-317/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS24-317-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS24-317] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS24-317] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS24-317] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS24-317] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls24-317/g2.go b/ecc/bls24-317/g2.go index 0dfaae292e..4000f36943 100644 --- a/ecc/bls24-317/g2.go +++ b/ecc/bls24-317/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E4 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E4 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bls24-317/g2_test.go b/ecc/bls24-317/g2_test.go index c46c57716b..8d11b24221 100644 --- a/ecc/bls24-317/g2_test.go +++ b/ecc/bls24-317/g2_test.go @@ -248,7 +248,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BLS24-317-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BLS24-317] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BLS24-317] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BLS24-317] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -279,7 +344,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BLS24-317] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BLS24-317] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E4) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -770,6 +835,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E4) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bls24-317/kzg/kzg.go b/ecc/bls24-317/kzg/kzg.go index 791e3d1f88..fc4d67762e 100644 --- a/ecc/bls24-317/kzg/kzg.go +++ b/ecc/bls24-317/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bls24317.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls24317.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls24317.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bls24317.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bls24317.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bls24317.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bls24317.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bls24317.PairingCheckFixedQ( - []bls24317.G1Affine{totalG1Aff, negH}, + []bls24317.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bls24-317/kzg/kzg_test.go b/ecc/bls24-317/kzg/kzg_test.go index cec2ab4577..ebf4a58f11 100644 --- a/ecc/bls24-317/kzg/kzg_test.go +++ b/ecc/bls24-317/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls24-317/fr" "github.com/consensys/gnark-crypto/ecc/bls24-317/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bls24317.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bls24-317/kzg/marshal.go b/ecc/bls24-317/kzg/marshal.go index bd85227f6d..3bab6e0062 100644 --- a/ecc/bls24-317/kzg/marshal.go +++ b/ecc/bls24-317/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bls24-317" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bls24317.Encoder)) return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bls24317.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bls24317.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bls24-317/twistededwards/point.go b/ecc/bls24-317/twistededwards/point.go index 906c6f741e..3bbcd3dc99 100644 --- a/ecc/bls24-317/twistededwards/point.go +++ b/ecc/bls24-317/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bn254/ecdsa/ecdsa.go b/ecc/bn254/ecdsa/ecdsa.go index dcde60137e..414a7e9402 100644 --- a/ecc/bn254/ecdsa/ecdsa.go +++ b/ecc/bn254/ecdsa/ecdsa.go @@ -42,6 +42,13 @@ const ( sizeSignature = 2 * sizeFr ) +var ( + // ErrNoSqrtR is returned when x^3+ax+b is not a square in the field. This + // is used for public key recovery and allows to detect if the signature is + // valid or not. + ErrNoSqrtR = errors.New("x^3+ax+b is not a square in the field") +) + var order = fr.Modulus() // PublicKey represents an ECDSA public key @@ -109,10 +116,10 @@ func HashToInt(hash []byte) *big.Int { return ret } -// RecoverP recovers the value P (prover commitment) when creating a signature. +// recoverP recovers the value P (prover commitment) when creating a signature. // It uses the recovery information v and part of the decomposed signature r. It // is used internally for recovering the public key. -func RecoverP(v uint, r *big.Int) (*bn254.G1Affine, error) { +func recoverP(v uint, r *big.Int) (*bn254.G1Affine, error) { if r.Cmp(fr.Modulus()) >= 0 { return nil, errors.New("r is larger than modulus") } @@ -139,7 +146,8 @@ func RecoverP(v uint, r *big.Int) (*bn254.G1Affine, error) { y.Mod(y, fp.Modulus()) // y = sqrt(y^2) if y.ModSqrt(y, fp.Modulus()) == nil { - return nil, errors.New("no square root") + // there is no square root, return error constant + return nil, ErrNoSqrtR } // check that y has same oddity as defined by v if y.Bit(0) != yChoice { diff --git a/ecc/bn254/ecdsa/marshal.go b/ecc/bn254/ecdsa/marshal.go index 1bbad0786f..eb618f2d63 100644 --- a/ecc/bn254/ecdsa/marshal.go +++ b/ecc/bn254/ecdsa/marshal.go @@ -73,7 +73,7 @@ func (pk *PublicKey) RecoverFrom(msg []byte, v uint, r, s *big.Int) error { if s.Cmp(big.NewInt(0)) <= 0 { return errors.New("s is negative") } - P, err := RecoverP(v, r) + P, err := recoverP(v, r) if err != nil { return err } diff --git a/ecc/bn254/fr/fri/fri_test.go b/ecc/bn254/fr/fri/fri_test.go index 4d12409c5e..9639f147b1 100644 --- a/ecc/bn254/fr/fri/fri_test.go +++ b/ecc/bn254/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bn254/fr/mimc/doc.go b/ecc/bn254/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bn254/fr/mimc/doc.go +++ b/ecc/bn254/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bn254/fr/pedersen/pedersen.go b/ecc/bn254/fr/pedersen/pedersen.go index d836eeaecb..e550177cba 100644 --- a/ecc/bn254/fr/pedersen/pedersen.go +++ b/ecc/bn254/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bn254/fr/pedersen/pedersen_test.go b/ecc/bn254/fr/pedersen/pedersen_test.go index 53109adfba..cf1adeb23c 100644 --- a/ecc/bn254/fr/pedersen/pedersen_test.go +++ b/ecc/bn254/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bn254/fr/sis/sis.go b/ecc/bn254/fr/sis/sis.go index dce215b141..de608b9de7 100644 --- a/ecc/bn254/fr/sis/sis.go +++ b/ecc/bn254/fr/sis/sis.go @@ -52,7 +52,7 @@ type RSis struct { // domain for the polynomial multiplication Domain *fft.Domain - twiddleCosets []fr.Element // see fft64 and precomputeTwiddlesCoset + twiddleCosets []fr.Element // see FFT64 and precomputeTwiddlesCoset // d, the degree of X^{d}+1 Degree int @@ -129,7 +129,7 @@ func NewRSis(seed int64, logTwoDegree, logTwoBound, maxNbElementsToHash int) (*R } if r.LogTwoBound == 8 && r.Degree == 64 { // TODO @gbotrel fixme, that's dirty. - r.twiddleCosets = precomputeTwiddlesCoset(r.Domain.Generator, r.Domain.FrMultiplicativeGen) + r.twiddleCosets = PrecomputeTwiddlesCoset(r.Domain.Generator, r.Domain.FrMultiplicativeGen) } // filling A @@ -199,7 +199,7 @@ func (r *RSis) Sum(b []byte) []byte { k := m[i*r.Degree : (i+1)*r.Degree] if fastPath { // fast path. - fft64(k, r.twiddleCosets) + FFT64(k, r.twiddleCosets) } else { r.Domain.FFT(k, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) } diff --git a/ecc/bn254/fr/sis/sis_fft.go b/ecc/bn254/fr/sis/sis_fft.go index 70b4d32d9d..99eb21888a 100644 --- a/ecc/bn254/fr/sis/sis_fft.go +++ b/ecc/bn254/fr/sis/sis_fft.go @@ -21,10 +21,10 @@ import ( "math/big" ) -// fft64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements +// FFT64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements // equivalent code: r.Domain.FFT(k, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) -// twiddlesCoset must be pre-computed from twiddles and coset table, see precomputeTwiddlesCoset -func fft64(a []fr.Element, twiddlesCoset []fr.Element) { +// twiddlesCoset must be pre-computed from twiddles and coset table, see PrecomputeTwiddlesCoset +func FFT64(a []fr.Element, twiddlesCoset []fr.Element) { a[32].Mul(&a[32], &twiddlesCoset[0]) a[33].Mul(&a[33], &twiddlesCoset[0]) @@ -412,9 +412,9 @@ func fft64(a []fr.Element, twiddlesCoset []fr.Element) { fr.Butterfly(&a[62], &a[63]) } -// precomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table +// PrecomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table // it then return all elements in the correct order for the unrolled FFT. -func precomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { +func PrecomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { toReturn := make([]fr.Element, 63) var r, s fr.Element e := new(big.Int) diff --git a/ecc/bn254/fr/sis/sis_test.go b/ecc/bn254/fr/sis/sis_test.go index 480a112779..24a14b46c9 100644 --- a/ecc/bn254/fr/sis/sis_test.go +++ b/ecc/bn254/fr/sis/sis_test.go @@ -426,8 +426,8 @@ func TestUnrolledFFT(t *testing.T) { domain.FFT(k1, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) // unrolled FFT - twiddlesCoset := precomputeTwiddlesCoset(domain.Generator, domain.FrMultiplicativeGen) - fft64(k2, twiddlesCoset) + twiddlesCoset := PrecomputeTwiddlesCoset(domain.Generator, domain.FrMultiplicativeGen) + FFT64(k2, twiddlesCoset) // compare results for i := 0; i < size; i++ { diff --git a/ecc/bn254/g1.go b/ecc/bn254/g1.go index 6945165857..5fdd0d8300 100644 --- a/ecc/bn254/g1.go +++ b/ecc/bn254/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -536,14 +588,13 @@ func (p *G1Jac) mulGLV(a *G1Jac, s *big.Int) *G1Jac { return p } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -607,6 +658,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bn254/g1_test.go b/ecc/bn254/g1_test.go index c973e4cbdd..cfbb1ed07b 100644 --- a/ecc/bn254/g1_test.go +++ b/ecc/bn254/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BN254-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BN254] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BN254] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BN254] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BN254] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BN254] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BN254] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BN254] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -734,6 +799,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bn254/g2.go b/ecc/bn254/g2.go index a1656b1798..911b3c7b06 100644 --- a/ecc/bn254/g2.go +++ b/ecc/bn254/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fptower.E2 + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fptower.E2 + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bn254/g2_test.go b/ecc/bn254/g2_test.go index 14646d4bbc..a8de3339ba 100644 --- a/ecc/bn254/g2_test.go +++ b/ecc/bn254/g2_test.go @@ -247,7 +247,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BN254-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BN254] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BN254] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BN254] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BN254] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BN254] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -278,7 +343,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BN254] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BN254] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fptower.E2) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -769,6 +834,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fptower.E2) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bn254/kzg/kzg.go b/ecc/bn254/kzg/kzg.go index 067894ab5e..2689f970dc 100644 --- a/ecc/bn254/kzg/kzg.go +++ b/ecc/bn254/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bn254.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bn254.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bn254.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bn254.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bn254.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bn254.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bn254.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bn254.PairingCheckFixedQ( - []bn254.G1Affine{totalG1Aff, negH}, + []bn254.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bn254/kzg/kzg_test.go b/ecc/bn254/kzg/kzg_test.go index 97adc15d81..b9cd530535 100644 --- a/ecc/bn254/kzg/kzg_test.go +++ b/ecc/bn254/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bn254.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bn254/kzg/marshal.go b/ecc/bn254/kzg/marshal.go index c9b019c5a0..b633b9bcbd 100644 --- a/ecc/bn254/kzg/marshal.go +++ b/ecc/bn254/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bn254" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bn254.Encoder)) (i return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bn254.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bn254.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bn254/pairing.go b/ecc/bn254/pairing.go index a0d39da70c..675eebedf3 100644 --- a/ecc/bn254/pairing.go +++ b/ecc/bn254/pairing.go @@ -439,13 +439,15 @@ func PrecomputeLines(Q G2Affine) (PrecomputedLines [2][len(LoopCounter)]LineEval n := len(LoopCounter) for i := n - 2; i >= 0; i-- { - accQ.doubleStep(&PrecomputedLines[0][i]) - if LoopCounter[i] == 1 { - accQ.addStep(&PrecomputedLines[1][i], &Q) - } else if LoopCounter[i] == -1 { - accQ.addStep(&PrecomputedLines[1][i], &negQ) - } else { - continue + switch LoopCounter[i] { + case 0: + accQ.doubleStep(&PrecomputedLines[0][i]) + case 1: + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &Q) + case -1: + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &negQ) + default: + return [2][len(LoopCounter)]LineEvaluationAff{} } } @@ -673,3 +675,49 @@ func (p *G2Affine) addStep(evaluations *LineEvaluationAff, a *G2Affine) { p.X.Set(&xr) p.Y.Set(&yr) } + +func (p *G2Affine) doubleAndAddStep(evaluations1, evaluations2 *LineEvaluationAff, a *G2Affine) { + var n, d, l1, x3, l2, x4, y4 fptower.E2 + + // compute λ1 = (y2-y1)/(x2-x1) + n.Sub(&p.Y, &a.Y) + d.Sub(&p.X, &a.X) + l1.Div(&n, &d) + + // compute x3 =λ1²-x1-x2 + x3.Square(&l1) + x3.Sub(&x3, &p.X) + x3.Sub(&x3, &a.X) + + // omit y3 computation + + // compute line1 + evaluations1.R0.Set(&l1) + evaluations1.R1.Mul(&l1, &p.X) + evaluations1.R1.Sub(&evaluations1.R1, &p.Y) + + // compute λ2 = -λ1-2y1/(x3-x1) + n.Double(&p.Y) + d.Sub(&x3, &p.X) + l2.Div(&n, &d) + l2.Add(&l2, &l1) + l2.Neg(&l2) + + // compute x4 = λ2²-x1-x3 + x4.Square(&l2) + x4.Sub(&x4, &p.X) + x4.Sub(&x4, &x3) + + // compute y4 = λ2(x1 - x4)-y1 + y4.Sub(&p.X, &x4) + y4.Mul(&l2, &y4) + y4.Sub(&y4, &p.Y) + + // compute line2 + evaluations2.R0.Set(&l2) + evaluations2.R1.Mul(&l2, &p.X) + evaluations2.R1.Sub(&evaluations2.R1, &p.Y) + + p.X.Set(&x4) + p.Y.Set(&y4) +} diff --git a/ecc/bn254/twistededwards/point.go b/ecc/bn254/twistededwards/point.go index 084c0db9b1..67e1ca47f5 100644 --- a/ecc/bn254/twistededwards/point.go +++ b/ecc/bn254/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bw6-633/fr/fri/fri_test.go b/ecc/bw6-633/fr/fri/fri_test.go index d0ff389db4..7fcf8dd707 100644 --- a/ecc/bw6-633/fr/fri/fri_test.go +++ b/ecc/bw6-633/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bw6-633/fr/mimc/doc.go b/ecc/bw6-633/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bw6-633/fr/mimc/doc.go +++ b/ecc/bw6-633/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bw6-633/fr/pedersen/pedersen.go b/ecc/bw6-633/fr/pedersen/pedersen.go index 60964a610e..cf2e80f9f7 100644 --- a/ecc/bw6-633/fr/pedersen/pedersen.go +++ b/ecc/bw6-633/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bw6-633/fr/pedersen/pedersen_test.go b/ecc/bw6-633/fr/pedersen/pedersen_test.go index c644bebd3e..ed27f87cdd 100644 --- a/ecc/bw6-633/fr/pedersen/pedersen_test.go +++ b/ecc/bw6-633/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bw6-633" "github.com/consensys/gnark-crypto/ecc/bw6-633/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bw6-633/g1.go b/ecc/bw6-633/g1.go index f402c9bf9a..285735708c 100644 --- a/ecc/bw6-633/g1.go +++ b/ecc/bw6-633/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -590,14 +642,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -661,6 +712,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bw6-633/g1_test.go b/ecc/bw6-633/g1_test.go index 51576b39f0..37ff9ba14c 100644 --- a/ecc/bw6-633/g1_test.go +++ b/ecc/bw6-633/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-633-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-633] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-633] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-633] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-633] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-633] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-633] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-633] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-633/g2.go b/ecc/bw6-633/g2.go index 435890609f..029544b4b0 100644 --- a/ecc/bw6-633/g2.go +++ b/ecc/bw6-633/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bw6-633/g2_test.go b/ecc/bw6-633/g2_test.go index d47bac2f56..271bc6ab02 100644 --- a/ecc/bw6-633/g2_test.go +++ b/ecc/bw6-633/g2_test.go @@ -234,7 +234,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-633-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-633] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-633] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-633] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-633] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-633] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -265,7 +330,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-633] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-633] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -751,6 +816,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fp.Element) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-633/internal/fptower/e3.go b/ecc/bw6-633/internal/fptower/e3.go index 03f65c0046..bb7421c777 100644 --- a/ecc/bw6-633/internal/fptower/e3.go +++ b/ecc/bw6-633/internal/fptower/e3.go @@ -128,14 +128,6 @@ func (z *E3) String() string { return (z.A0.String() + "+(" + z.A1.String() + ")*u+(" + z.A2.String() + ")*u**2") } -// Conjugate conjugates an element in E3 -func (z *E3) Conjugate(x *E3) *E3 { - z.A0.Set(&x.A0) - z.A1.Neg(&x.A1) - z.A2.Set(&x.A2) - return z -} - // MulByElement multiplies an element in E3 by an element in fp func (z *E3) MulByElement(x *E3, y *fp.Element) *E3 { var yCopy fp.Element diff --git a/ecc/bw6-633/internal/fptower/e3_test.go b/ecc/bw6-633/internal/fptower/e3_test.go index 0526292de7..9db8a02517 100644 --- a/ecc/bw6-633/internal/fptower/e3_test.go +++ b/ecc/bw6-633/internal/fptower/e3_test.go @@ -111,16 +111,6 @@ func TestE3ReceiverIsOperand(t *testing.T) { genA, )) - properties.Property("[BW6-633] Having the receiver as operand (Conjugate) should output the same result", prop.ForAll( - func(a *E3) bool { - var b E3 - b.Conjugate(a) - a.Conjugate(a) - return a.Equal(&b) - }, - genA, - )) - properties.Property("[BW6-633] Having the receiver as operand (mul by element) should output the same result", prop.ForAll( func(a *E3, b fp.Element) bool { var c E3 @@ -246,20 +236,6 @@ func TestE3Ops(t *testing.T) { genA, )) - properties.Property("[BW6-633] a + pi(a), a-pi(a) should be real", prop.ForAll( - func(a *E3) bool { - var b, c, d E3 - var e, f fp.Element - b.Conjugate(a) - c.Add(a, &b) - d.Sub(a, &b) - e.Double(&a.A0) - f.Double(&a.A1) - return c.A1.IsZero() && d.A0.IsZero() && e.Equal(&c.A0) && f.Equal(&d.A1) - }, - genA, - )) - properties.TestingRun(t, gopter.ConsoleReporter(false)) } @@ -333,12 +309,3 @@ func BenchmarkE3MulNonRes(b *testing.B) { a.MulByNonResidue(&a) } } - -func BenchmarkE3Conjugate(b *testing.B) { - var a E3 - _, _ = a.SetRandom() - b.ResetTimer() - for i := 0; i < b.N; i++ { - a.Conjugate(&a) - } -} diff --git a/ecc/bw6-633/kzg/kzg.go b/ecc/bw6-633/kzg/kzg.go index cf59dff0fa..041f45b3d1 100644 --- a/ecc/bw6-633/kzg/kzg.go +++ b/ecc/bw6-633/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bw6633.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bw6633.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bw6633.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bw6633.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bw6633.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bw6633.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bw6633.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bw6633.PairingCheckFixedQ( - []bw6633.G1Affine{totalG1Aff, negH}, + []bw6633.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bw6-633/kzg/kzg_test.go b/ecc/bw6-633/kzg/kzg_test.go index 477d4e578e..62fad1e7ac 100644 --- a/ecc/bw6-633/kzg/kzg_test.go +++ b/ecc/bw6-633/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bw6-633/fr" "github.com/consensys/gnark-crypto/ecc/bw6-633/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bw6633.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bw6-633/kzg/marshal.go b/ecc/bw6-633/kzg/marshal.go index 2b6a44034f..8cbce1f179 100644 --- a/ecc/bw6-633/kzg/marshal.go +++ b/ecc/bw6-633/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bw6-633" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bw6633.Encoder)) ( return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bw6633.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bw6633.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bw6-633/twistededwards/point.go b/ecc/bw6-633/twistededwards/point.go index 6dd41d0f6d..e47b6f14bd 100644 --- a/ecc/bw6-633/twistededwards/point.go +++ b/ecc/bw6-633/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bw6-756/fr/fri/fri_test.go b/ecc/bw6-756/fr/fri/fri_test.go index a44cdc9864..c302566bcf 100644 --- a/ecc/bw6-756/fr/fri/fri_test.go +++ b/ecc/bw6-756/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bw6-756/fr/mimc/doc.go b/ecc/bw6-756/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bw6-756/fr/mimc/doc.go +++ b/ecc/bw6-756/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bw6-756/fr/pedersen/pedersen.go b/ecc/bw6-756/fr/pedersen/pedersen.go index e33f50d43f..bbe7f9e1d9 100644 --- a/ecc/bw6-756/fr/pedersen/pedersen.go +++ b/ecc/bw6-756/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bw6-756/fr/pedersen/pedersen_test.go b/ecc/bw6-756/fr/pedersen/pedersen_test.go index c91e012b14..41f505d8b1 100644 --- a/ecc/bw6-756/fr/pedersen/pedersen_test.go +++ b/ecc/bw6-756/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bw6-756" "github.com/consensys/gnark-crypto/ecc/bw6-756/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bw6-756/g1.go b/ecc/bw6-756/g1.go index d0600ac1a4..23d5dc0e5e 100644 --- a/ecc/bw6-756/g1.go +++ b/ecc/bw6-756/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -589,14 +641,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { return p } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -660,6 +711,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bw6-756/g1_test.go b/ecc/bw6-756/g1_test.go index 81c0eab486..a93bed7b9a 100644 --- a/ecc/bw6-756/g1_test.go +++ b/ecc/bw6-756/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-756-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-756] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-756] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-756] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-756] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-756] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-756] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-756] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-756/g2.go b/ecc/bw6-756/g2.go index 1a7cc2e0ed..480ab0703f 100644 --- a/ecc/bw6-756/g2.go +++ b/ecc/bw6-756/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bw6-756/g2_test.go b/ecc/bw6-756/g2_test.go index 6c79825c53..723c1d143d 100644 --- a/ecc/bw6-756/g2_test.go +++ b/ecc/bw6-756/g2_test.go @@ -234,7 +234,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-756-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-756] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-756] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-756] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-756] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-756] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -265,7 +330,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-756] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-756] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -751,6 +816,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fp.Element) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-756/internal/fptower/e3.go b/ecc/bw6-756/internal/fptower/e3.go index 15fc2d4bbe..bc645379e9 100644 --- a/ecc/bw6-756/internal/fptower/e3.go +++ b/ecc/bw6-756/internal/fptower/e3.go @@ -124,13 +124,6 @@ func (z *E3) String() string { return (z.A0.String() + "+(" + z.A1.String() + ")*u+(" + z.A2.String() + ")*u**2") } -// Conjugate conjugates an element in E3 -func (z *E3) Conjugate(x *E3) *E3 { - *z = *x - z.A1.Neg(&z.A1) - return z -} - // MulByElement multiplies an element in E3 by an element in fp func (z *E3) MulByElement(x *E3, y *fp.Element) *E3 { _y := *y diff --git a/ecc/bw6-756/internal/fptower/e3_test.go b/ecc/bw6-756/internal/fptower/e3_test.go index 2e2eac7fa5..09cc04640f 100644 --- a/ecc/bw6-756/internal/fptower/e3_test.go +++ b/ecc/bw6-756/internal/fptower/e3_test.go @@ -112,16 +112,6 @@ func TestE3ReceiverIsOperand(t *testing.T) { genA, )) - properties.Property("[BW756] Having the receiver as operand (Conjugate) should output the same result", prop.ForAll( - func(a *E3) bool { - var b E3 - b.Conjugate(a) - a.Conjugate(a) - return a.Equal(&b) - }, - genA, - )) - properties.Property("[BW756] Having the receiver as operand (mul by element) should output the same result", prop.ForAll( func(a *E3, b fp.Element) bool { var c E3 @@ -248,20 +238,6 @@ func TestE3Ops(t *testing.T) { genA, )) - properties.Property("[BW756] a + pi(a), a-pi(a) should be real", prop.ForAll( - func(a *E3) bool { - var b, c, d E3 - var e, f fp.Element - b.Conjugate(a) - c.Add(a, &b) - d.Sub(a, &b) - e.Double(&a.A0) - f.Double(&a.A1) - return c.A1.IsZero() && d.A0.IsZero() && e.Equal(&c.A0) && f.Equal(&d.A1) - }, - genA, - )) - properties.TestingRun(t, gopter.ConsoleReporter(false)) } @@ -335,12 +311,3 @@ func BenchmarkE3MulNonRes(b *testing.B) { a.MulByNonResidue(&a) } } - -func BenchmarkE3Conjugate(b *testing.B) { - var a E3 - _, _ = a.SetRandom() - b.ResetTimer() - for i := 0; i < b.N; i++ { - a.Conjugate(&a) - } -} diff --git a/ecc/bw6-756/kzg/kzg.go b/ecc/bw6-756/kzg/kzg.go index 42539e691a..9002613d45 100644 --- a/ecc/bw6-756/kzg/kzg.go +++ b/ecc/bw6-756/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bw6756.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bw6756.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bw6756.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bw6756.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bw6756.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bw6756.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bw6756.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bw6756.PairingCheckFixedQ( - []bw6756.G1Affine{totalG1Aff, negH}, + []bw6756.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bw6-756/kzg/kzg_test.go b/ecc/bw6-756/kzg/kzg_test.go index 894753a5d2..1d40d4c950 100644 --- a/ecc/bw6-756/kzg/kzg_test.go +++ b/ecc/bw6-756/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bw6-756/fr" "github.com/consensys/gnark-crypto/ecc/bw6-756/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bw6756.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bw6-756/kzg/marshal.go b/ecc/bw6-756/kzg/marshal.go index da7706c50c..b74a955e20 100644 --- a/ecc/bw6-756/kzg/marshal.go +++ b/ecc/bw6-756/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bw6-756" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bw6756.Encoder)) ( return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bw6756.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bw6756.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bw6-756/twistededwards/point.go b/ecc/bw6-756/twistededwards/point.go index 799ff121ca..edb6d19f0e 100644 --- a/ecc/bw6-756/twistededwards/point.go +++ b/ecc/bw6-756/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/bw6-761/fr/fri/fri_test.go b/ecc/bw6-761/fr/fri/fri_test.go index 838e21d2cb..14f5798e2f 100644 --- a/ecc/bw6-761/fr/fri/fri_test.go +++ b/ecc/bw6-761/fr/fri/fri_test.go @@ -95,7 +95,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/ecc/bw6-761/fr/mimc/doc.go b/ecc/bw6-761/fr/mimc/doc.go index d527ead9ef..78837e1c80 100644 --- a/ecc/bw6-761/fr/mimc/doc.go +++ b/ecc/bw6-761/fr/mimc/doc.go @@ -15,4 +15,46 @@ // Code generated by consensys/gnark-crypto DO NOT EDIT // Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package mimc diff --git a/ecc/bw6-761/fr/pedersen/pedersen.go b/ecc/bw6-761/fr/pedersen/pedersen.go index b77c0724b0..a9a6e56498 100644 --- a/ecc/bw6-761/fr/pedersen/pedersen.go +++ b/ecc/bw6-761/fr/pedersen/pedersen.go @@ -30,8 +30,8 @@ import ( // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { @@ -74,17 +74,17 @@ func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err err pk = make([]ProvingKey, len(bases)) for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) } - pk[i].basis = bases[i] + pk[i].Basis = bases[i] } return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -95,13 +95,13 @@ func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, e NbTasks: 1, // TODO Experiment } - _, err = pok.MultiExp(pk.basisExpSigma, values, config) + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { + if len(values) != len(pk.Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -111,7 +111,7 @@ func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, er config := ecc.MultiExpConfig{ NbTasks: 1, } - _, err = commitment.MultiExp(pk.basis, values, config) + _, err = commitment.MultiExp(pk.Basis, values, config) return } @@ -131,7 +131,7 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt offset := 0 for i := range pk { - if len(values[i]) != len(pk[i].basis) { + if len(values[i]) != len(pk[i].Basis) { err = fmt.Errorf("must have as many values as basis elements") return } @@ -147,14 +147,14 @@ func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byt scaledValues := make([]fr.Element, offset) basis := make([]curve.G1Affine, offset) - copy(basis, pk[0].basisExpSigma) + copy(basis, pk[0].BasisExpSigma) copy(scaledValues, values[0]) offset = len(values[0]) rI := r for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { scaledValues[offset].Mul(&values[i][j], &rI) offset++ } @@ -245,11 +245,11 @@ func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { + if err := enc.Encode(pk.Basis); err != nil { return enc.BytesWritten(), err } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -265,14 +265,14 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) } diff --git a/ecc/bw6-761/fr/pedersen/pedersen_test.go b/ecc/bw6-761/fr/pedersen/pedersen_test.go index 32f53f99d1..4b9a78d813 100644 --- a/ecc/bw6-761/fr/pedersen/pedersen_test.go +++ b/ecc/bw6-761/fr/pedersen/pedersen_test.go @@ -20,7 +20,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bw6-761" "github.com/consensys/gnark-crypto/ecc/bw6-761/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -166,8 +166,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -178,8 +178,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/ecc/bw6-761/g1.go b/ecc/bw6-761/g1.go index 7367bdcc55..c9950d4bc3 100644 --- a/ecc/bw6-761/g1.go +++ b/ecc/bw6-761/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -601,14 +653,13 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -672,6 +723,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/bw6-761/g1_test.go b/ecc/bw6-761/g1_test.go index d15ae3a9f4..a174050394 100644 --- a/ecc/bw6-761/g1_test.go +++ b/ecc/bw6-761/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-761-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-761] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-761] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-761] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-761] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-761] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-761] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-761] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -781,6 +846,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-761/g2.go b/ecc/bw6-761/g2.go index a5eea4a762..b3fe75201e 100644 --- a/ecc/bw6-761/g2.go +++ b/ecc/bw6-761/g2.go @@ -79,34 +79,65 @@ func (p *G2Affine) ScalarMultiplicationBase(s *big.Int) *G2Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G2Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Double(a *G2Affine) *G2Affine { - var p1 G2Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G2Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { - var p1, p2 G2Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G2Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -276,6 +307,35 @@ func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G2Jac) DoubleMixed(a *G2Affine) *G2Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { @@ -300,7 +360,7 @@ func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -371,6 +431,11 @@ func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G2Jac) ScalarMultiplicationBase(s *big.Int) *G2Jac { + return p.mulGLV(&g2Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G2Jac) String() string { _p := G2Affine{} diff --git a/ecc/bw6-761/g2_test.go b/ecc/bw6-761/g2_test.go index 563aec5081..89f69def82 100644 --- a/ecc/bw6-761/g2_test.go +++ b/ecc/bw6-761/g2_test.go @@ -234,7 +234,72 @@ func TestG2AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[BW6-761-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[BW6-761] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[BW6-761] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-761] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G2Affine + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[BW6-761] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g2GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G2Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[BW6-761] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g2GenAff var gj G2Jac @@ -265,7 +330,7 @@ func TestG2AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[BW6-761] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[BW6-761] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG2Jac(&g2Gen, a) fop2 := fuzzG2Jac(&g2Gen, b) @@ -751,6 +816,24 @@ func BenchmarkG2JacExtDouble(b *testing.B) { } } +func BenchmarkG2AffineAdd(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g2GenAff) + } +} + +func BenchmarkG2AffineDouble(b *testing.B) { + var a G2Affine + a.Double(&g2GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG2Jac(p *G2Jac, f fp.Element) G2Jac { var res G2Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/bw6-761/internal/fptower/e3.go b/ecc/bw6-761/internal/fptower/e3.go index 29e7fd7ff7..b16151212d 100644 --- a/ecc/bw6-761/internal/fptower/e3.go +++ b/ecc/bw6-761/internal/fptower/e3.go @@ -124,13 +124,6 @@ func (z *E3) String() string { return (z.A0.String() + "+(" + z.A1.String() + ")*u+(" + z.A2.String() + ")*u**2") } -// Conjugate conjugates an element in E3 -func (z *E3) Conjugate(x *E3) *E3 { - *z = *x - z.A1.Neg(&z.A1) - return z -} - // MulByElement multiplies an element in E3 by an element in fp func (z *E3) MulByElement(x *E3, y *fp.Element) *E3 { _y := *y diff --git a/ecc/bw6-761/internal/fptower/e3_test.go b/ecc/bw6-761/internal/fptower/e3_test.go index 1fd50d2f5e..99de77cf15 100644 --- a/ecc/bw6-761/internal/fptower/e3_test.go +++ b/ecc/bw6-761/internal/fptower/e3_test.go @@ -111,16 +111,6 @@ func TestE3ReceiverIsOperand(t *testing.T) { genA, )) - properties.Property("[BW761] Having the receiver as operand (Conjugate) should output the same result", prop.ForAll( - func(a *E3) bool { - var b E3 - b.Conjugate(a) - a.Conjugate(a) - return a.Equal(&b) - }, - genA, - )) - properties.Property("[BW761] Having the receiver as operand (mul by element) should output the same result", prop.ForAll( func(a *E3, b fp.Element) bool { var c E3 @@ -246,20 +236,6 @@ func TestE3Ops(t *testing.T) { genA, )) - properties.Property("[BW761] a + pi(a), a-pi(a) should be real", prop.ForAll( - func(a *E3) bool { - var b, c, d E3 - var e, f fp.Element - b.Conjugate(a) - c.Add(a, &b) - d.Sub(a, &b) - e.Double(&a.A0) - f.Double(&a.A1) - return c.A1.IsZero() && d.A0.IsZero() && e.Equal(&c.A0) && f.Equal(&d.A1) - }, - genA, - )) - properties.TestingRun(t, gopter.ConsoleReporter(false)) } @@ -333,12 +309,3 @@ func BenchmarkE3MulNonRes(b *testing.B) { a.MulByNonResidue(&a) } } - -func BenchmarkE3Conjugate(b *testing.B) { - var a E3 - _, _ = a.SetRandom() - b.ResetTimer() - for i := 0; i < b.N; i++ { - a.Conjugate(&a) - } -} diff --git a/ecc/bw6-761/kzg/kzg.go b/ecc/bw6-761/kzg/kzg.go index 8984c2ef72..1ee2f5009b 100644 --- a/ecc/bw6-761/kzg/kzg.go +++ b/ecc/bw6-761/kzg/kzg.go @@ -217,35 +217,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff bw6761.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bw6761.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bw6761.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 bw6761.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) - var totalG1Aff bw6761.G1Affine - totalG1Aff.FromJacobian(&totalG1) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac bw6761.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 + var totalG1Aff bw6761.G1Affine + totalG1Aff.FromJacobian(&totalG1) check, err := bw6761.PairingCheckFixedQ( - []bw6761.G1Affine{totalG1Aff, negH}, + []bw6761.G1Affine{totalG1Aff, proof.H}, vk.Lines[:], ) + if err != nil { return err } diff --git a/ecc/bw6-761/kzg/kzg_test.go b/ecc/bw6-761/kzg/kzg_test.go index 3679834018..fdec0e1885 100644 --- a/ecc/bw6-761/kzg/kzg_test.go +++ b/ecc/bw6-761/kzg/kzg_test.go @@ -17,6 +17,7 @@ package kzg import ( + "bytes" "crypto/sha256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bw6-761/fr" "github.com/consensys/gnark-crypto/ecc/bw6-761/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -157,10 +158,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -431,7 +433,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1<<9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1<<8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -622,6 +659,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1<<24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []bw6761.G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) diff --git a/ecc/bw6-761/kzg/marshal.go b/ecc/bw6-761/kzg/marshal.go index 5f6c431866..ca9e1452bb 100644 --- a/ecc/bw6-761/kzg/marshal.go +++ b/ecc/bw6-761/kzg/marshal.go @@ -19,6 +19,8 @@ package kzg import ( "github.com/consensys/gnark-crypto/ecc/bw6-761" "io" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -76,6 +78,51 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*bw6761.Encoder)) ( return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0 { + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, bw6761.RawEncoding()); err != nil { + return err + } + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]bw6761.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/ecc/bw6-761/pairing.go b/ecc/bw6-761/pairing.go index cf6c18896b..ce141e076b 100644 --- a/ecc/bw6-761/pairing.go +++ b/ecc/bw6-761/pairing.go @@ -411,31 +411,32 @@ func PrecomputeLines(Q G2Affine) (PrecomputedLines [2][len(LoopCounter) - 1]Line // precomputations var accQ, imQ, imQneg, negQ G2Affine imQ.Y.Neg(&Q.Y) + imQ.X.Mul(&Q.X, &thirdRootOneG1) negQ.X.Set(&Q.X) negQ.Y.Set(&imQ.Y) - imQ.X.Mul(&Q.X, &thirdRootOneG1) accQ.Set(&imQ) - imQneg.Neg(&imQ) + imQneg.X.Set(&imQ.X) + imQneg.Y.Set(&Q.Y) - for i := len(LoopCounter) - 2; i >= 0; i-- { - accQ.doubleStep(&PrecomputedLines[0][i]) + for i := len(LoopCounter) - 2; i > 0; i-- { switch LoopCounter1[i]*3 + LoopCounter[i] { // cases -4, -2, 2, 4 do not occur, given the static LoopCounters case -3: - accQ.addStep(&PrecomputedLines[1][i], &imQneg) + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &imQneg) case -1: - accQ.addStep(&PrecomputedLines[1][i], &negQ) + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &negQ) case 0: - continue + accQ.doubleStep(&PrecomputedLines[0][i]) case 1: - accQ.addStep(&PrecomputedLines[1][i], &Q) + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &Q) case 3: - accQ.addStep(&PrecomputedLines[1][i], &imQ) + accQ.doubleAndAddStep(&PrecomputedLines[0][i], &PrecomputedLines[1][i], &imQ) default: return [2][len(LoopCounter) - 1]LineEvaluationAff{} } } + accQ.tangentCompute(&PrecomputedLines[0][0]) return PrecomputedLines } @@ -578,3 +579,64 @@ func (p *G2Affine) addStep(evaluations *LineEvaluationAff, a *G2Affine) { p.X.Set(&xr) p.Y.Set(&yr) } + +func (p *G2Affine) doubleAndAddStep(evaluations1, evaluations2 *LineEvaluationAff, a *G2Affine) { + var n, d, l1, x3, l2, x4, y4 fp.Element + + // compute λ1 = (y2-y1)/(x2-x1) + n.Sub(&p.Y, &a.Y) + d.Sub(&p.X, &a.X) + l1.Div(&n, &d) + + // compute x3 =λ1²-x1-x2 + x3.Square(&l1) + x3.Sub(&x3, &p.X) + x3.Sub(&x3, &a.X) + + // omit y3 computation + + // compute line1 + evaluations1.R0.Set(&l1) + evaluations1.R1.Mul(&l1, &p.X) + evaluations1.R1.Sub(&evaluations1.R1, &p.Y) + + // compute λ2 = -λ1-2y1/(x3-x1) + n.Double(&p.Y) + d.Sub(&x3, &p.X) + l2.Div(&n, &d) + l2.Add(&l2, &l1) + l2.Neg(&l2) + + // compute x4 = λ2²-x1-x3 + x4.Square(&l2) + x4.Sub(&x4, &p.X) + x4.Sub(&x4, &x3) + + // compute y4 = λ2(x1 - x4)-y1 + y4.Sub(&p.X, &x4) + y4.Mul(&l2, &y4) + y4.Sub(&y4, &p.Y) + + // compute line2 + evaluations2.R0.Set(&l2) + evaluations2.R1.Mul(&l2, &p.X) + evaluations2.R1.Sub(&evaluations2.R1, &p.Y) + + p.X.Set(&x4) + p.Y.Set(&y4) +} + +func (p *G2Affine) tangentCompute(evaluations *LineEvaluationAff) { + + var n, d, λ fp.Element + // λ = 3x²/2y + n.Square(&p.X) + λ.Double(&n). + Add(&λ, &n) + d.Double(&p.Y) + λ.Div(&λ, &d) + + evaluations.R0.Set(&λ) + evaluations.R1.Mul(&λ, &p.X). + Sub(&evaluations.R1, &p.Y) +} diff --git a/ecc/bw6-761/twistededwards/point.go b/ecc/bw6-761/twistededwards/point.go index 9a76c49047..b4a566b192 100644 --- a/ecc/bw6-761/twistededwards/point.go +++ b/ecc/bw6-761/twistededwards/point.go @@ -419,9 +419,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -449,6 +450,12 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP return p } +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + return p.scalarMulWindowed(p1, scalar) +} + // ------- Extended coordinates // Set sets p to p1 and return it @@ -628,9 +635,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -657,3 +665,9 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p } + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + return p.scalarMulWindowed(p1, scalar) +} diff --git a/ecc/secp256k1/ecdsa/ecdsa.go b/ecc/secp256k1/ecdsa/ecdsa.go index 6c633fc991..689bc5003e 100644 --- a/ecc/secp256k1/ecdsa/ecdsa.go +++ b/ecc/secp256k1/ecdsa/ecdsa.go @@ -42,6 +42,13 @@ const ( sizeSignature = 2 * sizeFr ) +var ( + // ErrNoSqrtR is returned when x^3+ax+b is not a square in the field. This + // is used for public key recovery and allows to detect if the signature is + // valid or not. + ErrNoSqrtR = errors.New("x^3+ax+b is not a square in the field") +) + var order = fr.Modulus() // PublicKey represents an ECDSA public key @@ -109,10 +116,10 @@ func HashToInt(hash []byte) *big.Int { return ret } -// RecoverP recovers the value P (prover commitment) when creating a signature. +// recoverP recovers the value P (prover commitment) when creating a signature. // It uses the recovery information v and part of the decomposed signature r. It // is used internally for recovering the public key. -func RecoverP(v uint, r *big.Int) (*secp256k1.G1Affine, error) { +func recoverP(v uint, r *big.Int) (*secp256k1.G1Affine, error) { if r.Cmp(fr.Modulus()) >= 0 { return nil, errors.New("r is larger than modulus") } @@ -139,7 +146,8 @@ func RecoverP(v uint, r *big.Int) (*secp256k1.G1Affine, error) { y.Mod(y, fp.Modulus()) // y = sqrt(y^2) if y.ModSqrt(y, fp.Modulus()) == nil { - return nil, errors.New("no square root") + // there is no square root, return error constant + return nil, ErrNoSqrtR } // check that y has same oddity as defined by v if y.Bit(0) != yChoice { diff --git a/ecc/secp256k1/ecdsa/marshal.go b/ecc/secp256k1/ecdsa/marshal.go index 886c3dace9..bc2be013c1 100644 --- a/ecc/secp256k1/ecdsa/marshal.go +++ b/ecc/secp256k1/ecdsa/marshal.go @@ -73,7 +73,7 @@ func (pk *PublicKey) RecoverFrom(msg []byte, v uint, r, s *big.Int) error { if s.Cmp(big.NewInt(0)) <= 0 { return errors.New("s is negative") } - P, err := RecoverP(v, r) + P, err := recoverP(v, r) if err != nil { return err } diff --git a/ecc/secp256k1/g1.go b/ecc/secp256k1/g1.go index ea9312c570..85e0aca7de 100644 --- a/ecc/secp256k1/g1.go +++ b/ecc/secp256k1/g1.go @@ -65,19 +65,6 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac { - p.FromAffine(a) - p.mulGLV(p, s) - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { - return p.mulGLV(&g1Gen, s) -} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { var _p G1Jac @@ -87,34 +74,65 @@ func (p *G1Affine) ScalarMultiplicationBase(s *big.Int) *G1Affine { } // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q G1Jac + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V fp.Element + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Double(a *G1Affine) *G1Affine { - var p1 G1Jac - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q G1Jac + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { - var p1, p2 G1Jac - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg G1Affine + bneg.Neg(b) + p.Add(a, &bneg) return p } @@ -284,6 +302,35 @@ func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *G1Jac) DoubleMixed(a *G1Affine) *G1Jac { + var XX, YY, YYYY, S, M, T fp.Element + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { @@ -308,7 +355,7 @@ func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -379,6 +426,11 @@ func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *G1Jac) ScalarMultiplicationBase(s *big.Int) *G1Jac { + return p.mulGLV(&g1Gen, s) +} + // String returns canonical representation of the point in affine coordinates func (p *G1Jac) String() string { _p := G1Affine{} @@ -535,14 +587,13 @@ func (p *G1Jac) mulGLV(a *G1Jac, s *big.Int) *G1Jac { return p } -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *G1Jac) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *G1Jac { var res, p1, p2 G1Jac res.Set(&g1Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]G1Jac @@ -606,6 +657,12 @@ func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1J } +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *G1Jac) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *G1Jac { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} + // ------------------------------------------------------------------------------------------------- // Jacobian extended diff --git a/ecc/secp256k1/g1_test.go b/ecc/secp256k1/g1_test.go index aa2fbb832d..f55a78e7cd 100644 --- a/ecc/secp256k1/g1_test.go +++ b/ecc/secp256k1/g1_test.go @@ -247,7 +247,72 @@ func TestG1AffineOps(t *testing.T) { genScalar := GenFr() - properties.Property("[SECP256K1-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[SECP256K1] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[SECP256K1] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[SECP256K1] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 G1Affine + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[SECP256K1] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := g1GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 G1Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[SECP256K1] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := g1GenAff var gj G1Jac @@ -278,7 +343,7 @@ func TestG1AffineOps(t *testing.T) { GenFr(), )) - properties.Property("[SECP256K1] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[SECP256K1] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b fp.Element) bool { fop1 := fuzzG1Jac(&g1Gen, a) fop2 := fuzzG1Jac(&g1Gen, b) @@ -734,6 +799,24 @@ func BenchmarkG1JacExtDouble(b *testing.B) { } } +func BenchmarkG1AffineAdd(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &g1GenAff) + } +} + +func BenchmarkG1AffineDouble(b *testing.B) { + var a G1Affine + a.Double(&g1GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} + func fuzzG1Jac(p *G1Jac, f fp.Element) G1Jac { var res G1Jac res.X.Mul(&p.X, &f).Mul(&res.X, &f) diff --git a/ecc/stark-curve/ecdsa/ecdsa.go b/ecc/stark-curve/ecdsa/ecdsa.go index 5d94cd7915..de997d8aef 100644 --- a/ecc/stark-curve/ecdsa/ecdsa.go +++ b/ecc/stark-curve/ecdsa/ecdsa.go @@ -42,6 +42,13 @@ const ( sizeSignature = 2 * sizeFr ) +var ( + // ErrNoSqrtR is returned when x^3+ax+b is not a square in the field. This + // is used for public key recovery and allows to detect if the signature is + // valid or not. + ErrNoSqrtR = errors.New("x^3+ax+b is not a square in the field") +) + var order = fr.Modulus() // PublicKey represents an ECDSA public key @@ -109,10 +116,10 @@ func HashToInt(hash []byte) *big.Int { return ret } -// RecoverP recovers the value P (prover commitment) when creating a signature. +// recoverP recovers the value P (prover commitment) when creating a signature. // It uses the recovery information v and part of the decomposed signature r. It // is used internally for recovering the public key. -func RecoverP(v uint, r *big.Int) (*starkcurve.G1Affine, error) { +func recoverP(v uint, r *big.Int) (*starkcurve.G1Affine, error) { if r.Cmp(fr.Modulus()) >= 0 { return nil, errors.New("r is larger than modulus") } @@ -139,7 +146,8 @@ func RecoverP(v uint, r *big.Int) (*starkcurve.G1Affine, error) { y.Mod(y, fp.Modulus()) // y = sqrt(y^2) if y.ModSqrt(y, fp.Modulus()) == nil { - return nil, errors.New("no square root") + // there is no square root, return error constant + return nil, ErrNoSqrtR } // check that y has same oddity as defined by v if y.Bit(0) != yChoice { diff --git a/ecc/stark-curve/ecdsa/marshal.go b/ecc/stark-curve/ecdsa/marshal.go index 47888049f1..8860f427e0 100644 --- a/ecc/stark-curve/ecdsa/marshal.go +++ b/ecc/stark-curve/ecdsa/marshal.go @@ -73,7 +73,7 @@ func (pk *PublicKey) RecoverFrom(msg []byte, v uint, r, s *big.Int) error { if s.Cmp(big.NewInt(0)) <= 0 { return errors.New("s is negative") } - P, err := RecoverP(v, r) + P, err := recoverP(v, r) if err != nil { return err } diff --git a/hash/doc.go b/hash/doc.go new file mode 100644 index 0000000000..faefa0669e --- /dev/null +++ b/hash/doc.go @@ -0,0 +1,48 @@ +// Package hash provides MiMC hash function defined over implemented curves +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. +// +// See open issues: +// - https://github.com/Consensys/gnark-crypto/issues/504 +// - https://github.com/Consensys/gnark-crypto/issues/485 +package hash diff --git a/hash/hashes.go b/hash/hashes.go index 37bce29593..8f668c7a4e 100644 --- a/hash/hashes.go +++ b/hash/hashes.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package hash provides MiMC hash function defined over curves implemented in gnark-crypto/ecc. -// -// Originally developed and used in a ZKP context. package hash import ( @@ -31,17 +28,27 @@ import ( bw761 "github.com/consensys/gnark-crypto/ecc/bw6-761/fr/mimc" ) +// Hash defines an unique identifier for a hash function. type Hash uint const ( + // MIMC_BN254 is the MiMC hash function for the BN254 curve. MIMC_BN254 Hash = iota + // MIMC_BLS12_381 is the MiMC hash function for the BLS12-381 curve. MIMC_BLS12_381 + // MIMC_BLS12_377 is the MiMC hash function for the BLS12-377 curve. MIMC_BLS12_377 + // MIMC_BLS12_378 is the MiMC hash function for the BLS12-378 curve. MIMC_BLS12_378 + // MIMC_BW6_761 is the MiMC hash function for the BW6-761 curve. MIMC_BW6_761 + // MIMC_BLS24_315 is the MiMC hash function for the BLS24-315 curve. MIMC_BLS24_315 + // MIMC_BLS24_317 is the MiMC hash function for the BLS24-317 curve. MIMC_BLS24_317 + // MIMC_BW6_633 is the MiMC hash function for the BW6-633 curve. MIMC_BW6_633 + // MIMC_BW6_756 is the MiMC hash function for the BW6-756 curve. MIMC_BW6_756 ) @@ -58,7 +65,7 @@ var digestSize = []uint8{ MIMC_BW6_756: 96, } -// New creates the corresponding mimc hash function. +// New initializes the hash function. func (m Hash) New() hash.Hash { switch m { case MIMC_BN254: @@ -84,7 +91,7 @@ func (m Hash) New() hash.Hash { } } -// String returns the mimc ID to string format. +// String returns the unique identifier of the hash function. func (m Hash) String() string { switch m { case MIMC_BN254: diff --git a/internal/generator/crypto/hash/mimc/template/doc.go.tmpl b/internal/generator/crypto/hash/mimc/template/doc.go.tmpl index 6e02e37877..38175ffd94 100644 --- a/internal/generator/crypto/hash/mimc/template/doc.go.tmpl +++ b/internal/generator/crypto/hash/mimc/template/doc.go.tmpl @@ -1,2 +1,44 @@ // Package {{.Package}} provides MiMC hash function using Miyaguchi–Preneel construction. +// +// # Length extension attack +// +// The MiMC hash function is vulnerable to a length extension attack. For +// example when we have a hash +// +// h = MiMC(k || m) +// +// and we want to hash a new message +// +// m' = m || m2, +// +// we can compute +// +// h' = MiMC(k || m || m2) +// +// without knowing k by computing +// +// h' = MiMC(h || m2). +// +// This is because the MiMC hash function is a simple iterated cipher, and the +// hash value is the state of the cipher after encrypting the message. +// +// There are several ways to mitigate this attack: +// - use a random key for each hash +// - use a domain separation tag for different use cases: +// h = MiMC(k || tag || m) +// - use the secret input as last input: +// h = MiMC(m || k) +// +// In general, inside a circuit the length-extension attack is not a concern as +// due to the circuit definition the attacker can not append messages to +// existing hash. But the user has to consider the cases when using a secret key +// and MiMC in different contexts. +// +// # Hash input format +// +// The MiMC hash function is defined over a field. The input to the hash +// function is a byte slice. The byte slice is interpreted as a sequence of +// field elements. Due to this interpretation, the input byte slice length must +// be multiple of the field modulus size. And every secuence of byte slice for a +// single field element must be strictly less than the field modulus. package {{.Package}} \ No newline at end of file diff --git a/internal/generator/ecc/template/point.go.tmpl b/internal/generator/ecc/template/point.go.tmpl index 602b995fe4..29d9082bea 100644 --- a/internal/generator/ecc/template/point.go.tmpl +++ b/internal/generator/ecc/template/point.go.tmpl @@ -75,67 +75,81 @@ func (p *{{ $TAffine }}) ScalarMultiplication(a *{{ $TAffine }}, s *big.Int) *{{ return p } -{{- if eq .PointName "g1"}} -// ScalarMultiplicationAffine computes and returns p = a ⋅ s -// Takes an affine point and returns a Jacobian point (useful for KZG) -func (p *{{ $TJacobian }}) ScalarMultiplicationAffine(a *{{ $TAffine }}, s *big.Int) *{{ $TJacobian }} { - p.FromAffine(a) - {{- if .GLV}} - p.mulGLV(p, s) - {{- else }} - p.mulWindowed(p, s) - {{- end }} - return p -} - -// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator -func (p *{{ $TJacobian }}) ScalarMultiplicationBase(s *big.Int) *{{ $TJacobian }} { - return p.mulGLV(&g1Gen, s) -} -{{- end}} - // ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator func (p *{{ $TAffine }}) ScalarMultiplicationBase(s *big.Int) *{{ $TAffine }} { var _p {{ $TJacobian }} - _p.mulGLV(&{{ toLower .PointName}}Gen, s) + {{- if .GLV}} + _p.mulGLV(&{{ toLower .PointName}}Gen, s) + {{- else }} + _p.mulWindowed(&{{ toLower .PointName}}Gen, s) + {{- end }} p.FromJacobian(&_p) return p } - // Add adds two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian +// Jacobian addition with Z1=Z2=1 +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl func (p *{{ $TAffine }}) Add(a, b *{{ $TAffine }}) *{{ $TAffine }} { - var p1, p2 {{ $TJacobian }} - p1.FromAffine(a) - p2.FromAffine(b) - p1.AddAssign(&p2) - p.FromJacobian(&p1) - return p + var q {{ $TJacobian }} + // a is infinity, return b + if a.IsInfinity() { + p.Set(b) + return p + } + // b is infinity, return a + if b.IsInfinity() { + p.Set(a) + return p + } + if a.X.Equal(&b.X) { + // if b == a, we double instead + if a.Y.Equal(&b.Y) { + q.DoubleMixed(a) + return p.FromJacobian(&q) + } else { + // if b == -a, we return 0 + return p.setInfinity() + } + } + var H, HH, I, J, r, V {{.CoordType}} + H.Sub(&b.X, &a.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&b.Y, &a.Y) + r.Double(&r) + V.Mul(&a.X, &I) + q.X.Square(&r). + Sub(&q.X, &J). + Sub(&q.X, &V). + Sub(&q.X, &V) + q.Y.Sub(&V, &q.X). + Mul(&q.Y, &r) + J.Mul(&a.Y, &J).Double(&J) + q.Y.Sub(&q.Y, &J) + q.Z.Double(&H) + + return p.FromJacobian(&q) } // Double doubles a point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *{{ $TAffine }}) Double(a *{{ $TAffine }}) *{{ $TAffine }} { - var p1 {{ $TJacobian }} - p1.FromAffine(a) - p1.Double(&p1) - p.FromJacobian(&p1) + var q {{ $TJacobian }} + q.FromAffine(a) + q.DoubleMixed(a) + p.FromJacobian(&q) return p } // Sub subs two point in affine coordinates. -// This should rarely be used as it is very inefficient compared to Jacobian func (p *{{ $TAffine }}) Sub(a, b *{{ $TAffine }}) *{{ $TAffine }} { - var p1, p2 {{ $TJacobian }} - p1.FromAffine(a) - p2.FromAffine(b) - p1.SubAssign(&p2) - p.FromJacobian(&p1) + var bneg {{ $TAffine }} + bneg.Neg(b) + p.Add(a, &bneg) return p } - // Equal tests if two points (in Affine coordinates) are equal func (p *{{ $TAffine }}) Equal(a *{{ $TAffine }}) bool { return p.X.Equal(&a.X) && p.Y.Equal(&a.Y) @@ -310,6 +324,35 @@ func (p *{{ $TJacobian }}) AddAssign(a *{{ $TJacobian }}) *{{ $TJacobian }} { return p } +// DoubleMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl +func (p *{{ $TJacobian }}) DoubleMixed(a *{{ $TAffine }}) *{{ $TJacobian }} { + var XX, YY, YYYY, S, M, T {{.CoordType}} + XX.Square(&a.X) + YY.Square(&a.Y) + YYYY.Square(&YY) + S.Add(&a.X, &YY). + Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + T.Square(&M). + Sub(&T, &S). + Sub(&T, &S) + p.X.Set(&T) + p.Y.Sub(&S, &T). + Mul(&p.Y, &M) + YYYY.Double(&YYYY). + Double(&YYYY). + Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + p.Z.Double(&a.Y) + + return p +} + // AddMixed point addition // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl func (p *{{ $TJacobian }}) AddMixed(a *{{ $TAffine }}) *{{ $TJacobian }} { @@ -334,7 +377,7 @@ func (p *{{ $TJacobian }}) AddMixed(a *{{ $TAffine }}) *{{ $TJacobian }} { // if p == a, we double instead if U2.Equal(&p.X) && S2.Equal(&p.Y) { - return p.DoubleAssign() + return p.DoubleMixed(a) } H.Sub(&U2, &p.X) @@ -410,6 +453,15 @@ func (p *{{ $TJacobian }}) ScalarMultiplication(a *{{ $TJacobian }}, s *big.Int) {{- end }} } +// ScalarMultiplicationBase computes and returns p = g ⋅ s where g is the prime subgroup generator +func (p *{{ $TJacobian }}) ScalarMultiplicationBase(s *big.Int) *{{ $TJacobian }} { + {{- if .GLV}} + return p.mulGLV(&{{ toLower .PointName}}Gen, s) + {{- else }} + _p.mulWindowed(&{{ toLower .PointName}}Gen, s) + {{- end }} +} + // String returns canonical representation of the point in affine coordinates func (p *{{ $TJacobian }}) String() string { _p := {{ $TAffine }}{} @@ -1186,14 +1238,13 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { {{ end }} {{ if eq .PointName "g1" }} -// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique -// where g is the prime subgroup generator -func (p *{{$TJacobian}}) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *{{$TJacobian}} { +// JointScalarMultiplication computes [s1]a1+[s2]a2 using Straus-Shamir technique +func (p *{{$TJacobian}}) JointScalarMultiplication(a1, a2 *G1Affine, s1, s2 *big.Int) *{{$TJacobian}} { var res, p1, p2 {{$TJacobian}} res.Set(&{{ toLower .PointName }}Infinity) - p1.Set(&g1Gen) - p2.FromAffine(a) + p1.FromAffine(a1) + p2.FromAffine(a2) var table [15]{{$TJacobian}} @@ -1256,6 +1307,12 @@ func (p *{{$TJacobian}}) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big. return p } + +// JointScalarMultiplicationBase computes [s1]g+[s2]a using Straus-Shamir technique +// where g is the prime subgroup generator +func (p *{{$TJacobian}}) JointScalarMultiplicationBase(a *G1Affine, s1, s2 *big.Int) *{{$TJacobian}} { + return p.JointScalarMultiplication(&g1GenAff, a, s1, s2) +} {{ end }} diff --git a/internal/generator/ecc/template/tests/point.go.tmpl b/internal/generator/ecc/template/tests/point.go.tmpl index 07626858dc..432fec1dc4 100644 --- a/internal/generator/ecc/template/tests/point.go.tmpl +++ b/internal/generator/ecc/template/tests/point.go.tmpl @@ -282,7 +282,72 @@ func Test{{ $TAffine }}Ops(t *testing.T) { genScalar := GenFr() - properties.Property("[{{ toUpper .Name }}-381] [-s]G = -[s]G", prop.ForAll( + properties.Property("[{{ toUpper .Name }}] Add(P,-P) should return the point at infinity", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 {{ toUpper .PointName }}Affine + var sInt big.Int + g := {{ toLower .PointName }}GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.Neg(&op1) + + op1.Add(&op1, &op2) + return op1.IsInfinity() + + }, + GenFr(), + )) + + properties.Property("[{{ toUpper .Name }}] Add(P,0) and Add(0,P) should return P", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 {{ toUpper .PointName }}Affine + var sInt big.Int + g := {{ toLower .PointName }}GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + op2.setInfinity() + + op1.Add(&op1, &op2) + op2.Add(&op2, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[{{ toUpper .Name }}] Add should call double when adding the same point", prop.ForAll( + func(s fr.Element) bool { + var op1, op2 {{ toUpper .PointName }}Affine + var sInt big.Int + g := {{ toLower .PointName }}GenAff + s.BigInt(&sInt) + op1.ScalarMultiplication(&g, &sInt) + + op2.Double(&op1) + op1.Add(&op1, &op1) + return op1.Equal(&op2) + + }, + GenFr(), + )) + + properties.Property("[{{ toUpper .Name }}] [2]G = double(G) + G - G", prop.ForAll( + func(s fr.Element) bool { + var sInt big.Int + g := {{ toLower .PointName }}GenAff + s.BigInt(&sInt) + g.ScalarMultiplication(&g, &sInt) + var op1, op2 {{ toUpper .PointName }}Affine + op1.ScalarMultiplication(&g, big.NewInt(2)) + op2.Double(&g) + op2.Add(&op2, &g) + op2.Sub(&op2, &g) + return op1.Equal(&op2) + }, + GenFr(), + )) + + properties.Property("[{{ toUpper .Name }}] [-s]G = -[s]G", prop.ForAll( func(s fr.Element) bool { g := {{ toLower .PointName }}GenAff var gj {{ toUpper .PointName }}Jac @@ -313,7 +378,7 @@ func Test{{ $TAffine }}Ops(t *testing.T) { GenFr(), )) - properties.Property("[{{ toUpper .Name }}] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + properties.Property("[{{ toUpper .Name }}] [Jacobian] Add should call double when adding the same point", prop.ForAll( func(a, b {{ .CoordType}}) bool { fop1 := fuzz{{ $TJacobian }}(&{{ toLower .PointName }}Gen, a) fop2 := fuzz{{ $TJacobian }}(&{{ toLower .PointName }}Gen, b) @@ -646,7 +711,7 @@ func Benchmark{{ $TJacobian }}Equal(b *testing.B) { b.Run("not equal", func(b *testing.B) { var aPlus1 {{ $TJacobian }} - aPlus1.AddAssign(&{{.PointName}}Gen) + aPlus1.AddAssign(&{{.PointName}}Gen) // Check the setup. if a.Equal(&aPlus1) { @@ -854,7 +919,23 @@ func Benchmark{{ toUpper .PointName}}JacExtDouble(b *testing.B) { } } +func Benchmark{{ toUpper .PointName}}AffineAdd(b *testing.B) { + var a {{ $TAffine }} + a.Double(&{{.PointName}}GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &{{.PointName}}GenAff) + } +} +func Benchmark{{ toUpper .PointName}}AffineDouble(b *testing.B) { + var a {{ $TAffine }} + a.Double(&{{.PointName}}GenAff) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Double(&a) + } +} func fuzz{{ $TJacobian }}(p *{{ $TJacobian }}, f {{ .CoordType}}) {{ $TJacobian }} { var res {{ $TJacobian }} diff --git a/internal/generator/ecdsa/template/ecdsa.go.tmpl b/internal/generator/ecdsa/template/ecdsa.go.tmpl index 1dda1ed34b..aa0e3868cc 100644 --- a/internal/generator/ecdsa/template/ecdsa.go.tmpl +++ b/internal/generator/ecdsa/template/ecdsa.go.tmpl @@ -30,6 +30,15 @@ const ( sizeSignature = 2 * sizeFr ) +{{- if or (eq .Name "secp256k1") (eq .Name "bn254") (eq .Name "stark-curve") }} +var ( + // ErrNoSqrtR is returned when x^3+ax+b is not a square in the field. This + // is used for public key recovery and allows to detect if the signature is + // valid or not. + ErrNoSqrtR = errors.New("x^3+ax+b is not a square in the field") +) +{{- end }} + var order = fr.Modulus() // PublicKey represents an ECDSA public key @@ -103,10 +112,10 @@ func HashToInt(hash []byte) *big.Int { } {{- if or (eq .Name "secp256k1") (eq .Name "bn254") (eq .Name "stark-curve") }} -// RecoverP recovers the value P (prover commitment) when creating a signature. +// recoverP recovers the value P (prover commitment) when creating a signature. // It uses the recovery information v and part of the decomposed signature r. It // is used internally for recovering the public key. -func RecoverP(v uint, r *big.Int) (*{{ .CurvePackage }}.G1Affine, error) { +func recoverP(v uint, r *big.Int) (*{{ .CurvePackage }}.G1Affine, error) { if r.Cmp(fr.Modulus()) >= 0 { return nil, errors.New("r is larger than modulus") } @@ -135,7 +144,8 @@ func RecoverP(v uint, r *big.Int) (*{{ .CurvePackage }}.G1Affine, error) { y.Mod(y, fp.Modulus()) // y = sqrt(y^2) if y.ModSqrt(y, fp.Modulus()) == nil { - return nil, errors.New("no square root") + // there is no square root, return error constant + return nil, ErrNoSqrtR } // check that y has same oddity as defined by v if y.Bit(0) != yChoice { diff --git a/internal/generator/ecdsa/template/marshal.go.tmpl b/internal/generator/ecdsa/template/marshal.go.tmpl index 5ca1f0cbca..12729f3b11 100644 --- a/internal/generator/ecdsa/template/marshal.go.tmpl +++ b/internal/generator/ecdsa/template/marshal.go.tmpl @@ -62,7 +62,7 @@ func (pk *PublicKey) RecoverFrom(msg []byte, v uint, r, s *big.Int) error { if s.Cmp(big.NewInt(0)) <= 0 { return errors.New("s is negative") } - P, err := RecoverP(v, r) + P, err := recoverP(v, r) if err != nil { return err } diff --git a/internal/generator/edwards/template/point.go.tmpl b/internal/generator/edwards/template/point.go.tmpl index a842e9c332..558633ca8c 100644 --- a/internal/generator/edwards/template/point.go.tmpl +++ b/internal/generator/edwards/template/point.go.tmpl @@ -2,9 +2,7 @@ import ( "crypto/subtle" "io" "math/big" - {{- if not .HasEndomorphism}} "math/bits" - {{- end }} "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr" ) @@ -403,12 +401,10 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in projective coordinates with a scalar in big.Int -func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { - {{- if .HasEndomorphism}} - return p.scalarMulGLV(p1, scalar) - {{- else }} +// using the windowed double-and-add method. +func (p *PointProj) scalarMulWindowed(p1 *PointProj, scalar *big.Int) *PointProj { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -434,6 +430,15 @@ func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointP p.Set(&resProj) return p +} + +// ScalarMultiplication scalar multiplication of a point +// p1 in projective coordinates with a scalar in big.Int +func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj { + {{- if .HasEndomorphism}} + return p.scalarMulGLV(p1, scalar) + {{- else }} + return p.scalarMulWindowed(p1, scalar) {{- end}} } @@ -624,12 +629,10 @@ func (p *PointExtended) setInfinity() *PointExtended { return p } -// ScalarMultiplication scalar multiplication of a point +// scalarMulWindowed scalar multiplication of a point // p1 in extended coordinates with a scalar in big.Int -func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { - {{- if .HasEndomorphism}} - return p.scalarMulGLV(p1, scalar) - {{- else }} +// using the windowed double-and-add method. +func (p *PointExtended) scalarMulWindowed(p1 *PointExtended, scalar *big.Int) *PointExtended { var _scalar big.Int _scalar.Set(scalar) p.Set(p1) @@ -655,5 +658,14 @@ func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) p.Set(&resExtended) return p - {{- end }} +} + +// ScalarMultiplication scalar multiplication of a point +// p1 in extended coordinates with a scalar in big.Int +func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended { + {{- if .HasEndomorphism}} + return p.scalarMulGLV(p1, scalar) + {{- else }} + return p.scalarMulWindowed(p1, scalar) + {{- end}} } diff --git a/internal/generator/edwards/template/tests/point.go.tmpl b/internal/generator/edwards/template/tests/point.go.tmpl index fe758266fe..b21b1aa252 100644 --- a/internal/generator/edwards/template/tests/point.go.tmpl +++ b/internal/generator/edwards/template/tests/point.go.tmpl @@ -508,6 +508,26 @@ func TestOps(t *testing.T) { genS1, )) + {{- if .HasEndomorphism}} + properties.Property("(projective) GLV and double-and-add scalar multiplications give the same results", prop.ForAll( + func(s1 big.Int) bool { + + params := GetEdwardsCurve() + + var baseProj, p1, p2 PointProj + baseProj.FromAffine(¶ms.Base) + + p1.scalarMulWindowed(&baseProj, &s1) + p2.scalarMulGLV(&baseProj, &s1) + + return p2.Equal(&p1) + + }, + genS1, + + )) + {{- end}} + // extended properties.Property("(extended) 0+0=0", prop.ForAll( func(s1 big.Int) bool { @@ -591,6 +611,26 @@ func TestOps(t *testing.T) { }, genS1, )) + {{- if .HasEndomorphism}} + properties.Property("(extended) GLV and double-and-add scalar multiplications give the same results", prop.ForAll( + func(s1 big.Int) bool { + + params := GetEdwardsCurve() + + var baseExtended, p1, p2 PointExtended + baseExtended.FromAffine(¶ms.Base) + + p1.scalarMulWindowed(&baseExtended, &s1) + p2.scalarMulGLV(&baseExtended, &s1) + + return p2.Equal(&p1) + + }, + genS1, + + )) + {{- end}} + // mixed affine+extended properties.Property("(mixed affine+extended) P+(-P)=O", prop.ForAll( @@ -879,7 +919,7 @@ func BenchmarkAdd(b *testing.B) { params := GetEdwardsCurve() var s big.Int s.SetString("52435875175126190479447705081859658376581184513", 10) - + b.Run("Affine", func(b *testing.B) { var point PointAffine point.ScalarMultiplication(¶ms.Base, &s) @@ -950,4 +990,4 @@ func BenchmarkIsOnCurve(b *testing.B) { _ = point.IsOnCurve() } }) -} \ No newline at end of file +} diff --git a/internal/generator/fri/template/fri.test.go.tmpl b/internal/generator/fri/template/fri.test.go.tmpl index df331544d2..0c6b357194 100644 --- a/internal/generator/fri/template/fri.test.go.tmpl +++ b/internal/generator/fri/template/fri.test.go.tmpl @@ -77,7 +77,7 @@ func TestFRI(t *testing.T) { return err != nil }, - gen.Int32Range(0, int32(rho*size)), + gen.Int32Range(1, int32(rho*size)), )) properties.Property("verifying correct opening should succeed", prop.ForAll( diff --git a/internal/generator/kzg/template/kzg.go.tmpl b/internal/generator/kzg/template/kzg.go.tmpl index 632c47c2b7..4a1eb34ec9 100644 --- a/internal/generator/kzg/template/kzg.go.tmpl +++ b/internal/generator/kzg/template/kzg.go.tmpl @@ -204,36 +204,27 @@ func Open(p []fr.Element, point fr.Element, pk ProvingKey) (OpeningProof, error) // Verify verifies a KZG opening proof at a single point func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, vk VerifyingKey) error { - // [f(a)]G₁ - var claimedValueG1Aff {{ .CurvePackage }}.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.BigInt(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&vk.G1, &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac {{ .CurvePackage }}.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH {{ .CurvePackage }}.G1Affine - negH.Neg(&proof.H) - - // [f(α) - f(a) + a*H(α)]G₁ + // [f(a)]G₁ + [-a]([H(α)]G₁) = [f(a) - a*H(α)]G₁ var totalG1 {{ .CurvePackage }}.G1Jac - var pointBigInt big.Int - point.BigInt(&pointBigInt) - totalG1.ScalarMultiplicationAffine(&proof.H, &pointBigInt) - totalG1.AddAssign(&fminusfaG1Jac) + var pointNeg fr.Element + var cmInt, pointInt big.Int + proof.ClaimedValue.BigInt(&cmInt) + pointNeg.Neg(&point).BigInt(&pointInt) + totalG1.JointScalarMultiplication(&vk.G1, &proof.H, &cmInt, &pointInt) + + // [f(a) - a*H(α)]G₁ + [-f(α)]G₁ = [f(a) - f(α) - a*H(α)]G₁ + var commitmentJac {{ .CurvePackage }}.G1Jac + commitmentJac.FromAffine(commitment) + totalG1.SubAssign(&commitmentJac) + + // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 var totalG1Aff {{ .CurvePackage }}.G1Affine totalG1Aff.FromJacobian(&totalG1) + check, err := {{ .CurvePackage }}.PairingCheckFixedQ( + []{{ .CurvePackage }}.G1Affine{totalG1Aff, proof.H}, + vk.Lines[:], + ) - - // e([f(α)-f(a)+aH(α)]G₁], G₂).e([-H(α)]G₁, [α]G₂) == 1 - check, err := {{ .CurvePackage }}.PairingCheckFixedQ( - []{{ .CurvePackage }}.G1Affine{totalG1Aff, negH}, - vk.Lines[:], - ) if err != nil { return err } diff --git a/internal/generator/kzg/template/kzg.test.go.tmpl b/internal/generator/kzg/template/kzg.test.go.tmpl index 45f6a35cf9..bc2dce9906 100644 --- a/internal/generator/kzg/template/kzg.test.go.tmpl +++ b/internal/generator/kzg/template/kzg.test.go.tmpl @@ -4,13 +4,14 @@ import ( "github.com/stretchr/testify/require" "math/big" "testing" + "bytes" "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark-crypto/ecc/{{ .Name }}" "github.com/consensys/gnark-crypto/ecc/{{ .Name }}/fr" "github.com/consensys/gnark-crypto/ecc/{{ .Name }}/fr/fft" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" ) // Test SRS re-used across tests of the KZG scheme @@ -140,10 +141,11 @@ func TestSerializationSRS(t *testing.T) { // create a SRS srs, err := NewSRS(64, new(big.Int).SetInt64(42)) assert.NoError(t, err) - t.Run("proving key round-trip", utils.SerializationRoundTrip(&srs.Pk)) - t.Run("proving key raw round-trip", utils.SerializationRoundTripRaw(&srs.Pk)) - t.Run("verifying key round-trip", utils.SerializationRoundTrip(&srs.Vk)) - t.Run("whole SRS round-trip", utils.SerializationRoundTrip(srs)) + t.Run("proving key round-trip", testutils.SerializationRoundTrip(&srs.Pk)) + t.Run("proving key raw round-trip", testutils.SerializationRoundTripRaw(&srs.Pk)) + t.Run("verifying key round-trip", testutils.SerializationRoundTrip(&srs.Vk)) + t.Run("whole SRS round-trip", testutils.SerializationRoundTrip(srs)) + t.Run("unsafe whole SRS round-trip", testutils.UnsafeBinaryMarshalerRoundTrip(srs)) } func TestCommit(t *testing.T) { @@ -414,7 +416,42 @@ func TestBatchVerifyMultiPoints(t *testing.T) { t.Fatal(err) } } +} + +func TestUnsafeToBytesTruncating(t *testing.T) { + assert := require.New(t) + srs, err := NewSRS(ecc.NextPowerOfTwo(1 << 10), big.NewInt(-1)) + assert.NoError(err) + + // marshal the SRS, but explicitly with less points. + var buf bytes.Buffer + err = srs.WriteDump(&buf, 1 << 9) + assert.NoError(err) + + r := bytes.NewReader(buf.Bytes()) + + // unmarshal the SRS + var newSRS SRS + err = newSRS.ReadDump(r) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 9 points + assert.Equal(1<<9, len(newSRS.Pk.G1)) + + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<9], newSRS.Pk.G1) + + // read even less points. + var newSRSPartial SRS + r = bytes.NewReader(buf.Bytes()) + err = newSRSPartial.ReadDump(r, 1 << 8) + assert.NoError(err) + + // check that the SRS proving key has only 1 << 8 points + assert.Equal(1<<8, len(newSRSPartial.Pk.G1)) + // ensure they are equal to the original SRS + assert.Equal(srs.Pk.G1[:1<<8], newSRSPartial.Pk.G1) } const benchSize = 1 << 16 @@ -606,6 +643,90 @@ func BenchmarkToLagrangeG1(b *testing.B) { } } +func BenchmarkSerializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1 << 24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + // now we can benchmark the WriteTo, WriteRawTo and WriteDump methods + b.Run("WriteTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteRawTo", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + _, err := srs.WriteRawTo(&buf) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("WriteDump", func(b *testing.B) { + b.ResetTimer() + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + if err := srs.WriteDump(&buf); err != nil { + b.Fatal(err) + } + } + }) + +} + +func BenchmarkDeserializeSRS(b *testing.B) { + // let's create a quick SRS + srs, err := NewSRS(ecc.NextPowerOfTwo(1 << 24), big.NewInt(-1)) + if err != nil { + b.Fatal(err) + } + + b.Run("UnsafeReadFrom", func(b *testing.B) { + var buf bytes.Buffer + if _, err := srs.WriteRawTo(&buf); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + _, err := newSRS.UnsafeReadFrom(bytes.NewReader(buf.Bytes())) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("ReadDump", func(b *testing.B) { + var buf bytes.Buffer + err := srs.WriteDump(&buf) + if err != nil { + b.Fatal(err) + } + data := buf.Bytes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var newSRS SRS + if err := newSRS.ReadDump(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } + }) +} + func fillBenchBasesG1(samplePoints []{{ .CurvePackage }}.G1Affine) { var r big.Int diff --git a/internal/generator/kzg/template/marshal.go.tmpl b/internal/generator/kzg/template/marshal.go.tmpl index c72d4f7cb8..8798bbf329 100644 --- a/internal/generator/kzg/template/marshal.go.tmpl +++ b/internal/generator/kzg/template/marshal.go.tmpl @@ -2,6 +2,8 @@ import ( "io" "github.com/consensys/gnark-crypto/ecc/{{ .Name }}" + + "github.com/consensys/gnark-crypto/utils/unsafe" ) // WriteTo writes binary encoding of the ProvingKey @@ -72,6 +74,52 @@ func (vk *VerifyingKey) writeTo(w io.Writer, options ...func(*{{.CurvePackage}}. return enc.BytesWritten(), nil } +// WriteDump writes the binary encoding of the entire SRS memory representation +// It is meant to be use to achieve fast serialization/deserialization and +// is not compatible with WriteTo / ReadFrom. It does not do any validation +// and doesn't encode points in a canonical form. +// @unsafe: this is platform dependent and may not be compatible with other platforms +// @unstable: the format may change in the future +// If maxPkPoints is provided, the number of points in the ProvingKey will be limited to maxPkPoints +func (srs *SRS) WriteDump(w io.Writer, maxPkPoints ...int) error { + maxG1 := len(srs.Pk.G1) + if len(maxPkPoints) > 0 && maxPkPoints[0] < maxG1 && maxPkPoints[0] > 0{ + maxG1 = maxPkPoints[0] + } + // first we write the VerifyingKey; it is small so we re-use WriteTo + + if _, err := srs.Vk.writeTo(w, {{.CurvePackage}}.RawEncoding()); err != nil { + return err + } + + + // write the marker + if err := unsafe.WriteMarker(w); err != nil { + return err + } + + // write the slice + return unsafe.WriteSlice(w, srs.Pk.G1[:maxG1]) +} + +// ReadDump deserializes the SRS from a reader, as written by WriteDump +func (srs *SRS) ReadDump(r io.Reader, maxPkPoints ...int) error { + // first we read the VerifyingKey; it is small so we re-use ReadFrom + _, err := srs.Vk.ReadFrom(r) + if err != nil { + return err + } + + // read the marker + if err := unsafe.ReadMarker(r); err != nil { + return err + } + + // read the slice + srs.Pk.G1, _, err = unsafe.ReadSlice[[]{{.CurvePackage}}.G1Affine](r, maxPkPoints...) + return err +} + // WriteTo writes binary encoding of the entire SRS func (srs *SRS) WriteTo(w io.Writer) (int64, error) { // encode the SRS diff --git a/internal/generator/pedersen/template/pedersen.go.tmpl b/internal/generator/pedersen/template/pedersen.go.tmpl index af1cdc8a58..db485881af 100644 --- a/internal/generator/pedersen/template/pedersen.go.tmpl +++ b/internal/generator/pedersen/template/pedersen.go.tmpl @@ -1,158 +1,158 @@ import ( - "crypto/rand" + "crypto/rand" "crypto/sha256" - "fmt" - "github.com/consensys/gnark-crypto/ecc" - curve "github.com/consensys/gnark-crypto/ecc/{{.Name}}" - "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr" + "fmt" + "github.com/consensys/gnark-crypto/ecc" + curve "github.com/consensys/gnark-crypto/ecc/{{.Name}}" + "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr" fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" "io" - "math/big" + "math/big" ) // ProvingKey for committing and proofs of knowledge type ProvingKey struct { - basis []curve.G1Affine - basisExpSigma []curve.G1Affine + Basis []curve.G1Affine + BasisExpSigma []curve.G1Affine } type VerifyingKey struct { - G curve.G2Affine // TODO @tabaie: does this really have to be randomized? - GRootSigmaNeg curve.G2Affine //gRootSigmaNeg = g^{-1/σ} + G curve.G2Affine // TODO @tabaie: does this really have to be randomized? + GRootSigmaNeg curve.G2Affine //gRootSigmaNeg = g^{-1/σ} } func randomFrSizedBytes() ([]byte, error) { - res := make([]byte, fr.Bytes) - _, err := rand.Read(res) - return res, err + res := make([]byte, fr.Bytes) + _, err := rand.Read(res) + return res, err } func randomOnG2() (curve.G2Affine, error) { // TODO: Add to G2.go? if gBytes, err := randomFrSizedBytes(); err != nil { - return curve.G2Affine{}, err + return curve.G2Affine{}, err } else { - return curve.HashToG2(gBytes, []byte("random on g2")) - } + return curve.HashToG2(gBytes, []byte("random on g2")) + } } func Setup(bases ...[]curve.G1Affine) (pk []ProvingKey, vk VerifyingKey, err error) { - if vk.G, err = randomOnG2(); err != nil { - return - } - - var modMinusOne big.Int - modMinusOne.Sub(fr.Modulus(), big.NewInt(1)) - var sigma *big.Int - if sigma, err = rand.Int(rand.Reader, &modMinusOne); err != nil { - return - } - sigma.Add(sigma, big.NewInt(1)) - - var sigmaInvNeg big.Int - sigmaInvNeg.ModInverse(sigma, fr.Modulus()) - sigmaInvNeg.Sub(fr.Modulus(), &sigmaInvNeg) - vk.GRootSigmaNeg.ScalarMultiplication(&vk.G, &sigmaInvNeg) - - pk = make([]ProvingKey, len(bases)) - for i := range bases { - pk[i].basisExpSigma = make([]curve.G1Affine, len(bases[i])) - for j := range bases[i] { - pk[i].basisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) - } - pk[i].basis = bases[i] - } - return + if vk.G, err = randomOnG2(); err != nil { + return + } + + var modMinusOne big.Int + modMinusOne.Sub(fr.Modulus(), big.NewInt(1)) + var sigma *big.Int + if sigma, err = rand.Int(rand.Reader, &modMinusOne); err != nil { + return + } + sigma.Add(sigma, big.NewInt(1)) + + var sigmaInvNeg big.Int + sigmaInvNeg.ModInverse(sigma, fr.Modulus()) + sigmaInvNeg.Sub(fr.Modulus(), &sigmaInvNeg) + vk.GRootSigmaNeg.ScalarMultiplication(&vk.G, &sigmaInvNeg) + + pk = make([]ProvingKey, len(bases)) + for i := range bases { + pk[i].BasisExpSigma = make([]curve.G1Affine, len(bases[i])) + for j := range bases[i] { + pk[i].BasisExpSigma[j].ScalarMultiplication(&bases[i][j], sigma) + } + pk[i].Basis = bases[i] + } + return } func (pk *ProvingKey) ProveKnowledge(values []fr.Element) (pok curve.G1Affine, err error) { - if len(values) != len(pk.basis) { - err = fmt.Errorf("must have as many values as basis elements") - return - } - - // TODO @gbotrel this will spawn more than one task, see - // https://github.com/ConsenSys/gnark-crypto/issues/269 - config := ecc.MultiExpConfig{ - NbTasks: 1, // TODO Experiment - } - - _, err = pok.MultiExp(pk.basisExpSigma, values, config) - return + if len(values) != len(pk.Basis) { + err = fmt.Errorf("must have as many values as basis elements") + return + } + + // TODO @gbotrel this will spawn more than one task, see + // https://github.com/ConsenSys/gnark-crypto/issues/269 + config := ecc.MultiExpConfig{ + NbTasks: 1, // TODO Experiment + } + + _, err = pok.MultiExp(pk.BasisExpSigma, values, config) + return } func (pk *ProvingKey) Commit(values []fr.Element) (commitment curve.G1Affine, err error) { - if len(values) != len(pk.basis) { - err = fmt.Errorf("must have as many values as basis elements") - return - } + if len(values) != len(pk.Basis) { + err = fmt.Errorf("must have as many values as basis elements") + return + } - // TODO @gbotrel this will spawn more than one task, see - // https://github.com/ConsenSys/gnark-crypto/issues/269 - config := ecc.MultiExpConfig{ - NbTasks: 1, - } - _, err = commitment.MultiExp(pk.basis, values, config) + // TODO @gbotrel this will spawn more than one task, see + // https://github.com/ConsenSys/gnark-crypto/issues/269 + config := ecc.MultiExpConfig{ + NbTasks: 1, + } + _, err = commitment.MultiExp(pk.Basis, values, config) - return + return } // BatchProve generates a single proof of knowledge for multiple commitments for faster verification func BatchProve(pk []ProvingKey, values [][]fr.Element, fiatshamirSeeds ...[]byte) (pok curve.G1Affine, err error) { - if len(pk) != len(values) { - err = fmt.Errorf("must have as many value vectors as bases") - return - } - - if len(pk) == 1 { // no need to fold - return pk[0].ProveKnowledge(values[0]) - } else if len(pk) == 0 { // nothing to do at all + if len(pk) != len(values) { + err = fmt.Errorf("must have as many value vectors as bases") + return + } + + if len(pk) == 1 { // no need to fold + return pk[0].ProveKnowledge(values[0]) + } else if len(pk) == 0 { // nothing to do at all + return + } + + offset := 0 + for i := range pk { + if len(values[i]) != len(pk[i].Basis) { + err = fmt.Errorf("must have as many values as basis elements") + return + } + offset += len(values[i]) + } + + var r fr.Element + if r, err = getChallenge(fiatshamirSeeds); err != nil { return - } - - offset := 0 - for i := range pk { - if len(values[i]) != len(pk[i].basis) { - err = fmt.Errorf("must have as many values as basis elements") - return - } - offset += len(values[i]) - } - - var r fr.Element - if r, err = getChallenge(fiatshamirSeeds); err != nil { - return - } - - // prepare one amalgamated MSM - scaledValues := make([]fr.Element, offset) - basis := make([]curve.G1Affine, offset) - - copy(basis, pk[0].basisExpSigma) - copy(scaledValues, values[0]) - - offset = len(values[0]) - rI := r - for i := 1; i < len(pk); i++ { - copy(basis[offset:], pk[i].basisExpSigma) - for j := range pk[i].basis { - scaledValues[offset].Mul(&values[i][j], &rI) - offset++ - } - if i+1 < len(pk) { - rI.Mul(&rI, &r) - } - } - - // TODO @gbotrel this will spawn more than one task, see - // https://github.com/ConsenSys/gnark-crypto/issues/269 - config := ecc.MultiExpConfig{ - NbTasks: 1, - } - - _, err = pok.MultiExp(basis, scaledValues, config) - return + } + + // prepare one amalgamated MSM + scaledValues := make([]fr.Element, offset) + basis := make([]curve.G1Affine, offset) + + copy(basis, pk[0].BasisExpSigma) + copy(scaledValues, values[0]) + + offset = len(values[0]) + rI := r + for i := 1; i < len(pk); i++ { + copy(basis[offset:], pk[i].BasisExpSigma) + for j := range pk[i].Basis { + scaledValues[offset].Mul(&values[i][j], &rI) + offset++ + } + if i+1 < len(pk) { + rI.Mul(&rI, &r) + } + } + + // TODO @gbotrel this will spawn more than one task, see + // https://github.com/ConsenSys/gnark-crypto/issues/269 + config := ecc.MultiExpConfig{ + NbTasks: 1, + } + + _, err = pok.MultiExp(basis, scaledValues, config) + return } // FoldCommitments amalgamates multiple commitments into one, which can be verifier against a folded proof obtained from BatchProve @@ -161,9 +161,9 @@ func FoldCommitments(commitments []curve.G1Affine, fiatshamirSeeds ...[]byte) (c if len(commitments) == 1 { // no need to fold commitment = commitments[0] return - } else if len(commitments) == 0 { // nothing to do at all + } else if len(commitments) == 0 { // nothing to do at all return - } + } r := make([]fr.Element, len(commitments)) r[0].SetOne() @@ -193,9 +193,9 @@ func FoldCommitments(commitments []curve.G1Affine, fiatshamirSeeds ...[]byte) (c // Verify checks if the proof of knowledge is valid func (vk *VerifyingKey) Verify(commitment curve.G1Affine, knowledgeProof curve.G1Affine) error { - if !commitment.IsInSubGroup() || !knowledgeProof.IsInSubGroup() { - return fmt.Errorf("subgroup check failed") - } + if !commitment.IsInSubGroup() || !knowledgeProof.IsInSubGroup() { + return fmt.Errorf("subgroup check failed") + } if isOne, err := curve.PairingCheck([]curve.G1Affine{commitment, knowledgeProof}, []curve.G2Affine{vk.G, vk.GRootSigmaNeg}); err != nil { return err @@ -206,32 +206,32 @@ func (vk *VerifyingKey) Verify(commitment curve.G1Affine, knowledgeProof curve.G } func getChallenge(fiatshamirSeeds [][]byte) (r fr.Element, err error) { - // incorporate user-provided seeds into the transcript - t := fiatshamir.NewTranscript(sha256.New(), "r") - for i := range fiatshamirSeeds { - if err = t.Bind("r", fiatshamirSeeds[i]); err != nil { - return - } - } - - // obtain the challenge - var rBytes []byte - - if rBytes, err = t.ComputeChallenge("r"); err != nil { - return - } - r.SetBytes(rBytes) // TODO @Tabaie Plonk challenge generation done the same way; replace both with hash to fr? - return + // incorporate user-provided seeds into the transcript + t := fiatshamir.NewTranscript(sha256.New(), "r") + for i := range fiatshamirSeeds { + if err = t.Bind("r", fiatshamirSeeds[i]); err != nil { + return + } + } + + // obtain the challenge + var rBytes []byte + + if rBytes, err = t.ComputeChallenge("r"); err != nil { + return + } + r.SetBytes(rBytes) // TODO @Tabaie Plonk challenge generation done the same way; replace both with hash to fr? + return } // Marshal func (pk *ProvingKey) writeTo(enc *curve.Encoder) (int64, error) { - if err := enc.Encode(pk.basis); err != nil { - return enc.BytesWritten(), err - } + if err := enc.Encode(pk.Basis); err != nil { + return enc.BytesWritten(), err + } - err := enc.Encode(pk.basisExpSigma) + err := enc.Encode(pk.BasisExpSigma) return enc.BytesWritten(), err } @@ -247,16 +247,16 @@ func (pk *ProvingKey) WriteRawTo(w io.Writer) (int64, error) { func (pk *ProvingKey) ReadFrom(r io.Reader) (int64, error) { dec := curve.NewDecoder(r) - if err := dec.Decode(&pk.basis); err != nil { + if err := dec.Decode(&pk.Basis); err != nil { + return dec.BytesRead(), err + } + if err := dec.Decode(&pk.BasisExpSigma); err != nil { return dec.BytesRead(), err } - if err := dec.Decode(&pk.basisExpSigma); err != nil { - return dec.BytesRead(), err - } - if cL, pL := len(pk.basis), len(pk.basisExpSigma); cL != pL { - return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) - } + if cL, pL := len(pk.Basis), len(pk.BasisExpSigma); cL != pL { + return dec.BytesRead(), fmt.Errorf("commitment basis size (%d) doesn't match proof basis size (%d)", cL, pL) + } return dec.BytesRead(), nil } diff --git a/internal/generator/pedersen/template/pedersen.test.go.tmpl b/internal/generator/pedersen/template/pedersen.test.go.tmpl index f8556b811a..dab3699f54 100644 --- a/internal/generator/pedersen/template/pedersen.test.go.tmpl +++ b/internal/generator/pedersen/template/pedersen.test.go.tmpl @@ -2,7 +2,7 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/{{.Name}}" "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr" - "github.com/consensys/gnark-crypto/utils" + "github.com/consensys/gnark-crypto/utils/testutils" "github.com/stretchr/testify/assert" "testing" ) @@ -52,7 +52,7 @@ func testCommit(t *testing.T, values ...interface{}) { var ( pk []ProvingKey vk VerifyingKey - err error + err error commitment, pok curve.G1Affine ) valuesFr := interfaceSliceToFrSlice(t, values...) @@ -103,7 +103,7 @@ func TestFoldProofs(t *testing.T) { assert.Equal(t, pok, pokFolded) }) - t.Run("run empty", func (t *testing.T) { + t.Run("run empty", func(t *testing.T) { var foldedCommitment curve.G1Affine pok, err := BatchProve([]ProvingKey{}, [][]fr.Element{}, []byte("test")) assert.NoError(t, err) @@ -148,8 +148,8 @@ func TestCommitFiveElements(t *testing.T) { func TestMarshal(t *testing.T) { var pk ProvingKey - pk.basisExpSigma = randomG1Slice(t, 5) - pk.basis = randomG1Slice(t, 5) + pk.BasisExpSigma = randomG1Slice(t, 5) + pk.Basis = randomG1Slice(t, 5) var ( vk VerifyingKey @@ -160,8 +160,8 @@ func TestMarshal(t *testing.T) { vk.GRootSigmaNeg, err = randomOnG2() assert.NoError(t, err) - t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", utils.SerializationRoundTrip(&pk)) - t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&pk)) - t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", utils.SerializationRoundTrip(&vk)) - t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", utils.SerializationRoundTripRaw(&vk)) + t.Run("ProvingKey -> Bytes -> ProvingKey must remain identical.", testutils.SerializationRoundTrip(&pk)) + t.Run("ProvingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&pk)) + t.Run("VerifyingKey -> Bytes -> VerifyingKey must remain identical.", testutils.SerializationRoundTrip(&vk)) + t.Run("VerifyingKey -> Bytes (raw) -> ProvingKey must remain identical.", testutils.SerializationRoundTripRaw(&vk)) } diff --git a/internal/generator/sis/template/fft.go.tmpl b/internal/generator/sis/template/fft.go.tmpl index 91938f81d0..fefbae06df 100644 --- a/internal/generator/sis/template/fft.go.tmpl +++ b/internal/generator/sis/template/fft.go.tmpl @@ -3,10 +3,10 @@ import ( "math/big" ) -// fft64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements +// FFT64 is generated by gnark-crypto and contains the unrolled code for FFT (DIF) on 64 elements // equivalent code: r.Domain.FFT(k, fft.DIF, fft.OnCoset(), fft.WithNbTasks(1)) -// twiddlesCoset must be pre-computed from twiddles and coset table, see precomputeTwiddlesCoset -func fft64(a []fr.Element, twiddlesCoset []fr.Element) { +// twiddlesCoset must be pre-computed from twiddles and coset table, see PrecomputeTwiddlesCoset +func FFT64(a []fr.Element, twiddlesCoset []fr.Element) { {{- /* notes: this function can be updated with larger n @@ -47,9 +47,9 @@ func fft64(a []fr.Element, twiddlesCoset []fr.Element) { {{- end}} } -// precomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table +// PrecomputeTwiddlesCoset precomputes twiddlesCoset from twiddles and coset table // it then return all elements in the correct order for the unrolled FFT. -func precomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { +func PrecomputeTwiddlesCoset(generator, shifter fr.Element) []fr.Element { toReturn := make([]fr.Element, 63) var r, s fr.Element e := new(big.Int) diff --git a/kzg/kzg.go b/kzg/kzg.go index bab9896e80..134b11576d 100644 --- a/kzg/kzg.go +++ b/kzg/kzg.go @@ -22,11 +22,17 @@ import ( type Serializable interface { io.ReaderFrom io.WriterTo + BinaryDumper WriteRawTo(w io.Writer) (n int64, err error) UnsafeReadFrom(r io.Reader) (int64, error) } +type BinaryDumper interface { + WriteDump(w io.Writer, maxPkPoints ...int) error + ReadDump(r io.Reader, maxPkPoints ...int) error +} + type SRS Serializable // NewSRS returns an empty curved-typed SRS object diff --git a/utils/arith.go b/utils/arith.go index 521497da2b..d3562eadb7 100644 --- a/utils/arith.go +++ b/utils/arith.go @@ -1,28 +1,5 @@ package utils -// DivCeiling (a, b) = ⌈a/b⌉ -func DivCeiling(a, b uint) uint { - q := a / b - if q*b == a { - return q - } - return q + 1 -} - -func MinU(a, b uint) uint { - if a < b { - return a - } - return b -} - -func Min(a, b int) int { - if a < b { - return a - } - return b -} - func Max(a, b int) int { if a > b { return a diff --git a/utils/testing.go b/utils/testutils/testing.go similarity index 64% rename from utils/testing.go rename to utils/testutils/testing.go index 247ec63d5d..6af412d2be 100644 --- a/utils/testing.go +++ b/utils/testutils/testing.go @@ -1,11 +1,12 @@ -package utils +package testutils import ( "bytes" - "github.com/stretchr/testify/assert" "io" "reflect" "testing" + + "github.com/stretchr/testify/assert" ) type Serializable interface { @@ -17,6 +18,11 @@ type RawSerializable interface { WriteRawTo(io.Writer) (int64, error) } +type BinaryDumper interface { + WriteDump(w io.Writer, maxPkPoints ...int) error + ReadDump(r io.Reader, maxPkPoints ...int) error +} + func SerializationRoundTrip(o Serializable) func(*testing.T) { return func(t *testing.T) { // serialize it... @@ -50,3 +56,20 @@ func SerializationRoundTripRaw(o RawSerializable) func(*testing.T) { assert.Equal(t, o, _o) } } + +func UnsafeBinaryMarshalerRoundTrip(o BinaryDumper) func(*testing.T) { + return func(t *testing.T) { + // serialize it... + var buf bytes.Buffer + err := o.WriteDump(&buf) + assert.NoError(t, err) + + // reconstruct the object + _o := reflect.New(reflect.TypeOf(o).Elem()).Interface().(BinaryDumper) + err = _o.ReadDump(&buf) + assert.NoError(t, err) + + // compare + assert.Equal(t, o, _o) + } +} diff --git a/utils/unsafe/dump_slice.go b/utils/unsafe/dump_slice.go new file mode 100644 index 0000000000..7a14c61c13 --- /dev/null +++ b/utils/unsafe/dump_slice.go @@ -0,0 +1,102 @@ +package unsafe + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "unsafe" +) + +// WriteSlice writes a slice of arbitrary objects to the writer. +// Use with caution, as it writes the raw memory representation of the slice; +// In particular you do not want to use this with slices that contain pointers. +// This architecture dependent and will not work across different architectures +// (e.g. 32 vs 64 bit, big endian vs little endian). +func WriteSlice[S ~[]E, E any](w io.Writer, s S) error { + var e E + size := int(unsafe.Sizeof(e)) + if err := binary.Write(w, binary.LittleEndian, uint64(len(s))); err != nil { + return err + } + + if len(s) == 0 { + return nil + } + + data := unsafe.Slice((*byte)(unsafe.Pointer(&s[0])), size*len(s)) + if _, err := w.Write(data); err != nil { + return err + } + return nil +} + +// ReadSlice reads a slice of arbitrary objects from the reader, written by WriteSlice. +func ReadSlice[S ~[]E, E any](r io.Reader, maxElements ...int) (s S, read int, err error) { + var buf [8]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return nil, 0, err + } + read += 8 + + // decode length of the slice + length := binary.LittleEndian.Uint64(buf[:]) + + var e E + size := int(unsafe.Sizeof(e)) + limit := length + if len(maxElements) == 1 && maxElements[0] > 0 && int(length) > maxElements[0] { + limit = uint64(maxElements[0]) + } + + if limit == 0 { + return make(S, 0), read, nil + } + + toReturn := make(S, limit) + + // directly read the bytes from reader into the target memory area + // (slice data) + data := unsafe.Slice((*byte)(unsafe.Pointer(&toReturn[0])), size*int(limit)) + if _, err := io.ReadFull(r, data); err != nil { + return nil, read, err + } + + read += size * int(limit) + + // advance the reader if we had more elements than we wanted + if length > limit { + advance := int(length-limit) * size + if _, err := io.CopyN(io.Discard, r, int64(advance)); err != nil { + return nil, read, err + } + read += advance + } + + return toReturn, read, nil +} + +const marker uint64 = 0xdeadbeef + +// WriteMarker writes the raw memory representation of a fixed marker to the writer. +// This is used to ensure that the dump was written on the same architecture. +func WriteMarker(w io.Writer) error { + marker := marker + _, err := w.Write(unsafe.Slice((*byte)(unsafe.Pointer(&marker)), 8)) + return err +} + +// ReadMarker reads the raw memory representation of a fixed marker from the reader. +// This is used to ensure that the dump was written on the same architecture. +func ReadMarker(r io.Reader) error { + var buf [8]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return err + } + marker := marker + d := unsafe.Slice((*byte)(unsafe.Pointer(&marker)), 8) + if !bytes.Equal(d, buf[:]) { + return errors.New("marker mismatch: dump was not written on the same architecture") + } + return nil +} diff --git a/utils/unsafe/dump_slice_test.go b/utils/unsafe/dump_slice_test.go new file mode 100644 index 0000000000..ee2286cec1 --- /dev/null +++ b/utils/unsafe/dump_slice_test.go @@ -0,0 +1,52 @@ +package unsafe_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bn254" + "github.com/consensys/gnark-crypto/utils/unsafe" + "github.com/stretchr/testify/require" +) + +func TestPointDump(t *testing.T) { + assert := require.New(t) + samplePoints := make([]bn254.G2Affine, 10) + fillBenchBasesG2(samplePoints) + + var buf bytes.Buffer + + err := unsafe.WriteSlice(&buf, samplePoints) + assert.NoError(err) + + readPoints, _, err := unsafe.ReadSlice[[]bn254.G2Affine](&buf) + assert.NoError(err) + + assert.Equal(samplePoints, readPoints) +} + +func TestMarker(t *testing.T) { + assert := require.New(t) + var buf bytes.Buffer + + err := unsafe.WriteMarker(&buf) + assert.NoError(err) + + err = unsafe.ReadMarker(&buf) + assert.NoError(err) +} + +func fillBenchBasesG2(samplePoints []bn254.G2Affine) { + var r big.Int + r.SetString("340444420969191673093399857471996460938405", 10) + samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + + one := samplePoints[0].X + one.SetOne() + + for i := 1; i < len(samplePoints); i++ { + samplePoints[i].X.Add(&samplePoints[i-1].X, &one) + samplePoints[i].Y.Sub(&samplePoints[i-1].Y, &one) + } +}