From 815f96f757b5b30804695db31e6fa546f8651eb6 Mon Sep 17 00:00:00 2001 From: Cory Benfield Date: Fri, 13 Dec 2024 13:54:13 +0000 Subject: [PATCH] Update BoringSSL to aefa5d24da34ef77ac797bdbe684734e5bd870f4 --- Package.swift | 2 +- .../crypto/asn1/posix_time.cc | 2 +- .../CCryptoBoringSSL/crypto/asn1/tasn_fre.cc | 30 +- Sources/CCryptoBoringSSL/crypto/bcm_support.h | 2 +- .../crypto/bio/socket_helper.cc | 2 +- .../CCryptoBoringSSL/crypto/blake2/blake2.cc | 2 +- .../crypto/bn_extra/bn_asn1.cc | 2 +- .../crypto/bytestring/asn1_compat.cc | 2 +- .../CCryptoBoringSSL/crypto/bytestring/ber.cc | 2 +- .../CCryptoBoringSSL/crypto/bytestring/cbb.cc | 2 +- .../CCryptoBoringSSL/crypto/bytestring/cbs.cc | 2 +- .../crypto/bytestring/internal.h | 2 +- .../crypto/bytestring/unicode.cc | 2 +- .../CCryptoBoringSSL/crypto/chacha/chacha.cc | 2 +- .../CCryptoBoringSSL/crypto/chacha/internal.h | 2 +- .../crypto/cipher_extra/e_aesctrhmac.cc | 2 +- .../crypto/cipher_extra/e_aesgcmsiv.cc | 2 +- .../crypto/cipher_extra/e_chacha20poly1305.cc | 2 +- .../crypto/cipher_extra/e_tls.cc | 2 +- .../CCryptoBoringSSL/crypto/conf/internal.h | 2 +- .../crypto/cpu_aarch64_apple.cc | 2 +- .../crypto/cpu_aarch64_fuchsia.cc | 2 +- .../crypto/cpu_aarch64_linux.cc | 2 +- .../crypto/cpu_aarch64_sysreg.cc | 2 +- .../crypto/cpu_aarch64_win.cc | 2 +- .../crypto/cpu_arm_freebsd.cc | 2 +- .../CCryptoBoringSSL/crypto/cpu_arm_linux.cc | 2 +- .../CCryptoBoringSSL/crypto/cpu_arm_linux.h | 2 +- Sources/CCryptoBoringSSL/crypto/cpu_intel.cc | 18 +- Sources/CCryptoBoringSSL/crypto/crypto.cc | 4 +- .../crypto/curve25519/asm/x25519-asm-arm.S | 2 +- .../crypto/curve25519/curve25519.cc | 2 +- .../crypto/curve25519/curve25519_64_adx.cc | 2 +- .../crypto/curve25519/curve25519_tables.h | 2 +- .../crypto/curve25519/internal.h | 2 +- .../crypto/curve25519/spake25519.cc | 2 +- .../crypto/dh_extra/dh_asn1.cc | 4 +- .../crypto/dilithium/dilithium.cc | 1538 ----------- .../crypto/dilithium/internal.h | 58 - .../CCryptoBoringSSL/crypto/dsa/dsa_asn1.cc | 4 +- .../CCryptoBoringSSL/crypto/dsa/internal.h | 2 +- .../crypto/ec_extra/ec_derive.cc | 2 +- .../crypto/ec_extra/hash_to_curve.cc | 2 +- .../crypto/ec_extra/internal.h | 2 +- .../CCryptoBoringSSL/crypto/engine/engine.cc | 2 +- Sources/CCryptoBoringSSL/crypto/err/err.cc | 1 - .../CCryptoBoringSSL/crypto/err/internal.h | 2 +- Sources/CCryptoBoringSSL/crypto/evp/p_ec.cc | 4 +- .../CCryptoBoringSSL/crypto/evp/p_ec_asn1.cc | 4 +- .../CCryptoBoringSSL/crypto/evp/p_ed25519.cc | 2 +- .../crypto/evp/p_ed25519_asn1.cc | 2 +- Sources/CCryptoBoringSSL/crypto/evp/p_hkdf.cc | 2 +- Sources/CCryptoBoringSSL/crypto/evp/p_rsa.cc | 4 +- .../CCryptoBoringSSL/crypto/evp/p_rsa_asn1.cc | 4 +- .../CCryptoBoringSSL/crypto/evp/p_x25519.cc | 2 +- .../crypto/evp/p_x25519_asn1.cc | 2 +- Sources/CCryptoBoringSSL/crypto/evp/pbkdf.cc | 4 +- Sources/CCryptoBoringSSL/crypto/evp/scrypt.cc | 2 +- .../crypto/fipsmodule/aes/aes_nohw.cc.inc | 2 +- .../crypto/fipsmodule/aes/internal.h | 2 +- .../CCryptoBoringSSL/crypto/fipsmodule/bcm.cc | 4 +- .../crypto/fipsmodule/bcm_interface.h | 222 +- .../crypto/fipsmodule/bn/div_extra.cc.inc | 2 +- .../crypto/fipsmodule/bn/gcd_extra.cc.inc | 2 +- .../crypto/fipsmodule/cipher/aead.cc.inc | 2 +- .../crypto/fipsmodule/delocate.h | 2 +- .../crypto/fipsmodule/dh/internal.h | 2 +- .../crypto/fipsmodule/digest/digest.cc.inc | 13 +- .../fipsmodule/digestsign/digestsign.cc.inc | 4 +- .../crypto/fipsmodule/ec/builtin_curves.h | 2 +- .../crypto/fipsmodule/ec/felem.cc.inc | 2 +- .../crypto/fipsmodule/ec/p224-64.cc.inc | 2 +- .../crypto/fipsmodule/ec/p256.cc.inc | 2 +- .../crypto/fipsmodule/ec/p256_table.h | 2 +- .../crypto/fipsmodule/ec/scalar.cc.inc | 2 +- .../crypto/fipsmodule/ec/simple_mul.cc.inc | 2 +- .../crypto/fipsmodule/ec/util.cc.inc | 2 +- .../crypto/fipsmodule/ecdsa/internal.h | 2 +- .../crypto/fipsmodule/fips_shared_support.cc | 2 +- .../crypto/fipsmodule/hkdf/hkdf.cc.inc | 2 +- .../crypto/{ => fipsmodule}/keccak/internal.h | 2 +- .../keccak/keccak.cc.inc} | 4 +- .../crypto/fipsmodule/mldsa/mldsa.cc.inc | 2031 +++++++++++++++ .../crypto/fipsmodule/modes/gcm.cc.inc | 90 +- .../crypto/fipsmodule/modes/gcm_nohw.cc.inc | 2 +- .../crypto/fipsmodule/modes/internal.h | 86 +- .../crypto/fipsmodule/modes/polyval.cc.inc | 5 +- .../crypto/fipsmodule/rand/ctrdrbg.cc.inc | 2 +- .../crypto/fipsmodule/rand/internal.h | 2 +- .../crypto/fipsmodule/rand/rand.cc.inc | 16 +- .../crypto/fipsmodule/rsa/padding.cc.inc | 4 +- .../crypto/fipsmodule/self_check/fips.cc.inc | 2 +- .../fipsmodule/self_check/self_check.cc.inc | 2 +- .../service_indicator.cc.inc | 2 +- .../crypto/fipsmodule/sha/internal.h | 2 +- .../crypto/fipsmodule/sha/sha1.cc.inc | 10 +- .../crypto/fipsmodule/sha/sha256.cc.inc | 14 +- .../crypto/fipsmodule/sha/sha512.cc.inc | 20 +- .../crypto/fipsmodule/tls/internal.h | 2 +- Sources/CCryptoBoringSSL/crypto/hpke/hpke.cc | 2 +- Sources/CCryptoBoringSSL/crypto/hrss/hrss.cc | 2 +- .../CCryptoBoringSSL/crypto/hrss/internal.h | 2 +- Sources/CCryptoBoringSSL/crypto/internal.h | 104 +- .../CCryptoBoringSSL/crypto/kyber/internal.h | 2 +- .../CCryptoBoringSSL/crypto/kyber/kyber.cc | 4 +- Sources/CCryptoBoringSSL/crypto/md4/md4.cc | 8 +- .../CCryptoBoringSSL/crypto/md5/internal.h | 2 +- .../CCryptoBoringSSL/crypto/mldsa/internal.h | 76 - .../CCryptoBoringSSL/crypto/mldsa/mldsa.cc | 1778 +------------ .../CCryptoBoringSSL/crypto/mlkem/internal.h | 2 +- .../CCryptoBoringSSL/crypto/mlkem/mlkem.cc | 18 +- .../CCryptoBoringSSL/crypto/pkcs7/internal.h | 2 +- .../CCryptoBoringSSL/crypto/pkcs7/pkcs7.cc | 2 +- .../crypto/pkcs7/pkcs7_x509.cc | 2 +- .../CCryptoBoringSSL/crypto/pkcs8/internal.h | 4 +- .../CCryptoBoringSSL/crypto/pkcs8/p5_pbev2.cc | 4 +- .../CCryptoBoringSSL/crypto/pkcs8/pkcs8.cc | 4 +- .../crypto/pkcs8/pkcs8_x509.cc | 4 +- .../crypto/poly1305/internal.h | 2 +- .../crypto/poly1305/poly1305.cc | 2 +- .../crypto/poly1305/poly1305_arm.cc | 2 +- .../crypto/poly1305/poly1305_vec.cc | 2 +- .../CCryptoBoringSSL/crypto/pool/internal.h | 2 +- Sources/CCryptoBoringSSL/crypto/pool/pool.cc | 2 +- .../crypto/rand_extra/deterministic.cc | 2 +- .../crypto/rand_extra/fork_detect.cc | 2 +- .../crypto/rand_extra/forkunsafe.cc | 2 +- .../crypto/rand_extra/getentropy.cc | 2 +- .../crypto/rand_extra/getrandom_fillin.h | 2 +- .../CCryptoBoringSSL/crypto/rand_extra/ios.cc | 2 +- .../crypto/rand_extra/passive.cc | 2 +- .../crypto/rand_extra/rand_extra.cc | 2 +- .../crypto/rand_extra/sysrand_internal.h | 2 +- .../crypto/rand_extra/trusty.cc | 2 +- .../crypto/rand_extra/urandom.cc | 2 +- .../crypto/rand_extra/windows.cc | 2 +- Sources/CCryptoBoringSSL/crypto/refcount.cc | 2 +- .../crypto/rsa_extra/rsa_asn1.cc | 4 +- .../crypto/rsa_extra/rsa_extra.cc | 2 +- Sources/CCryptoBoringSSL/crypto/sha/sha1.cc | 2 +- Sources/CCryptoBoringSSL/crypto/sha/sha256.cc | 2 +- Sources/CCryptoBoringSSL/crypto/sha/sha512.cc | 2 +- .../crypto/siphash/siphash.cc | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/address.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/fors.cc | 2 +- Sources/CCryptoBoringSSL/crypto/slhdsa/fors.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/internal.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/merkle.cc | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/merkle.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/params.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/slhdsa.cc | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/thash.cc | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/thash.h | 2 +- .../CCryptoBoringSSL/crypto/slhdsa/wots.cc | 2 +- Sources/CCryptoBoringSSL/crypto/slhdsa/wots.h | 2 +- Sources/CCryptoBoringSSL/crypto/spx/spx.cc | 140 - .../crypto/spx/spx_address.cc | 101 - .../CCryptoBoringSSL/crypto/spx/spx_address.h | 50 - .../CCryptoBoringSSL/crypto/spx/spx_fors.cc | 133 - .../CCryptoBoringSSL/crypto/spx/spx_fors.h | 54 - .../CCryptoBoringSSL/crypto/spx/spx_merkle.cc | 150 -- .../CCryptoBoringSSL/crypto/spx/spx_merkle.h | 61 - .../CCryptoBoringSSL/crypto/spx/spx_params.h | 71 - .../CCryptoBoringSSL/crypto/spx/spx_thash.cc | 136 - .../CCryptoBoringSSL/crypto/spx/spx_thash.h | 70 - .../CCryptoBoringSSL/crypto/spx/spx_util.cc | 53 - .../CCryptoBoringSSL/crypto/spx/spx_util.h | 44 - .../CCryptoBoringSSL/crypto/spx/spx_wots.cc | 135 - .../CCryptoBoringSSL/crypto/spx/spx_wots.h | 45 - .../CCryptoBoringSSL/crypto/thread_none.cc | 2 +- .../CCryptoBoringSSL/crypto/thread_pthread.cc | 2 +- Sources/CCryptoBoringSSL/crypto/thread_win.cc | 2 +- .../crypto/trust_token/internal.h | 2 +- .../crypto/trust_token/pmbtoken.cc | 2 +- .../crypto/trust_token/trust_token.cc | 2 +- .../crypto/trust_token/voprf.cc | 2 +- .../CCryptoBoringSSL/crypto/x509/algorithm.cc | 2 +- .../CCryptoBoringSSL/crypto/x509/policy.cc | 2 +- .../CCryptoBoringSSL/crypto/x509/rsa_pss.cc | 4 +- .../CCryptoBoringSSL/crypto/x509/v3_alt.cc | 3 +- .../CCryptoBoringSSL/crypto/x509/v3_ncons.cc | 3 +- .../CCryptoBoringSSL/crypto/x509/v3_pcons.cc | 3 +- .../CCryptoBoringSSL/crypto/x509/v3_pmaps.cc | 3 +- .../CCryptoBoringSSL/crypto/x509/v3_utl.cc | 3 +- .../gen/bcm/aes-gcm-avx10-x86_64-apple.S | 2269 ++++++++++++++++ .../gen/bcm/aes-gcm-avx10-x86_64-linux.S | 2279 +++++++++++++++++ .../CCryptoBoringSSL/gen/crypto/err_data.cc | 2 +- Sources/CCryptoBoringSSL/hash.txt | 2 +- .../include/CCryptoBoringSSL_aead.h | 2 +- .../include/CCryptoBoringSSL_asm_base.h | 2 +- .../include/CCryptoBoringSSL_asn1_mac.h | 2 +- .../include/CCryptoBoringSSL_asn1t.h | 4 +- .../include/CCryptoBoringSSL_bcm_public.h | 2 +- .../include/CCryptoBoringSSL_blake2.h | 2 +- ...CryptoBoringSSL_boringssl_prefix_symbols.h | 109 +- ...toBoringSSL_boringssl_prefix_symbols_asm.h | 109 +- .../include/CCryptoBoringSSL_buffer.h | 2 +- .../include/CCryptoBoringSSL_bytestring.h | 2 +- .../include/CCryptoBoringSSL_chacha.h | 2 +- .../include/CCryptoBoringSSL_cmac.h | 2 +- .../include/CCryptoBoringSSL_cpu.h | 2 +- .../include/CCryptoBoringSSL_crypto.h | 2 +- .../include/CCryptoBoringSSL_ctrdrbg.h | 2 +- .../include/CCryptoBoringSSL_curve25519.h | 2 +- .../include/CCryptoBoringSSL_digest.h | 7 +- .../include/CCryptoBoringSSL_e_os2.h | 2 +- .../include/CCryptoBoringSSL_engine.h | 2 +- .../include/CCryptoBoringSSL_hkdf.h | 2 +- .../include/CCryptoBoringSSL_hpke.h | 2 +- .../include/CCryptoBoringSSL_hrss.h | 2 +- .../include/CCryptoBoringSSL_is_boringssl.h | 2 +- .../include/CCryptoBoringSSL_kdf.h | 2 +- .../include/CCryptoBoringSSL_mldsa.h | 2 +- .../include/CCryptoBoringSSL_mlkem.h | 2 +- .../include/CCryptoBoringSSL_obj_mac.h | 2 +- .../include/CCryptoBoringSSL_objects.h | 2 +- .../include/CCryptoBoringSSL_opensslconf.h | 2 +- .../include/CCryptoBoringSSL_opensslv.h | 2 +- .../include/CCryptoBoringSSL_ossl_typ.h | 2 +- .../include/CCryptoBoringSSL_pkcs12.h | 2 +- .../include/CCryptoBoringSSL_pkcs7.h | 2 +- .../include/CCryptoBoringSSL_pkcs8.h | 4 +- .../include/CCryptoBoringSSL_poly1305.h | 2 +- .../include/CCryptoBoringSSL_pool.h | 2 +- .../include/CCryptoBoringSSL_posix_time.h | 2 +- .../include/CCryptoBoringSSL_rand.h | 2 +- .../include/CCryptoBoringSSL_safestack.h | 2 +- .../include/CCryptoBoringSSL_siphash.h | 2 +- .../include/CCryptoBoringSSL_slhdsa.h | 2 +- .../include/CCryptoBoringSSL_span.h | 2 +- .../include/CCryptoBoringSSL_target.h | 2 +- .../include/CCryptoBoringSSL_time.h | 2 +- .../include/CCryptoBoringSSL_trust_token.h | 2 +- .../include/CCryptoBoringSSL_x509_vfy.h | 2 +- .../include/CCryptoBoringSSL_x509v3.h | 2 +- .../include/CCryptoBoringSSL_x509v3_errors.h | 4 +- .../include/boringssl_prefix_symbols_nasm.inc | 216 +- .../experimental/CCryptoBoringSSL_dilithium.h | 129 - .../experimental/CCryptoBoringSSL_kyber.h | 2 +- .../experimental/CCryptoBoringSSL_spx.h | 90 - 240 files changed, 7464 insertions(+), 5568 deletions(-) delete mode 100644 Sources/CCryptoBoringSSL/crypto/dilithium/dilithium.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/dilithium/internal.h rename Sources/CCryptoBoringSSL/crypto/{ => fipsmodule}/keccak/internal.h (98%) rename Sources/CCryptoBoringSSL/crypto/{keccak/keccak.cc => fipsmodule/keccak/keccak.cc.inc} (99%) create mode 100644 Sources/CCryptoBoringSSL/crypto/fipsmodule/mldsa/mldsa.cc.inc delete mode 100644 Sources/CCryptoBoringSSL/crypto/mldsa/internal.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_address.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_address.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_fors.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_fors.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_params.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_thash.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_thash.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_util.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_util.h delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_wots.cc delete mode 100644 Sources/CCryptoBoringSSL/crypto/spx/spx_wots.h create mode 100644 Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-apple.S create mode 100644 Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-linux.S delete mode 100644 Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_dilithium.h delete mode 100644 Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_spx.h diff --git a/Package.swift b/Package.swift index 070dfbe2..04f0df2e 100644 --- a/Package.swift +++ b/Package.swift @@ -20,7 +20,7 @@ // Sources/CCryptoBoringSSL directory. The source repository is at // https://boringssl.googlesource.com/boringssl. // -// BoringSSL Commit: fcef13a49852397a0d39c00be8d7bc2ba1ab6fb9 +// BoringSSL Commit: aefa5d24da34ef77ac797bdbe684734e5bd870f4 import PackageDescription diff --git a/Sources/CCryptoBoringSSL/crypto/asn1/posix_time.cc b/Sources/CCryptoBoringSSL/crypto/asn1/posix_time.cc index 22163921..89257aae 100644 --- a/Sources/CCryptoBoringSSL/crypto/asn1/posix_time.cc +++ b/Sources/CCryptoBoringSSL/crypto/asn1/posix_time.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/asn1/tasn_fre.cc b/Sources/CCryptoBoringSSL/crypto/asn1/tasn_fre.cc index 147503d7..b9736990 100644 --- a/Sources/CCryptoBoringSSL/crypto/asn1/tasn_fre.cc +++ b/Sources/CCryptoBoringSSL/crypto/asn1/tasn_fre.cc @@ -70,9 +70,6 @@ void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it) { } void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { - const ASN1_TEMPLATE *tt = NULL, *seqtt; - const ASN1_EXTERN_FUNCS *ef; - int i; if (!pval) { return; } @@ -97,16 +94,14 @@ void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { - i = asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL); - if (i == 2) { + if (asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL) == 2) { return; } } - i = asn1_get_choice_selector(pval, it); + int i = asn1_get_choice_selector(pval, it); if ((i >= 0) && (i < it->tcount)) { - ASN1_VALUE **pchval; - tt = it->templates + i; - pchval = asn1_get_field_ptr(pval, tt); + const ASN1_TEMPLATE *tt = it->templates + i; + ASN1_VALUE **pchval = asn1_get_field_ptr(pval, tt); ASN1_template_free(pchval, tt); } if (asn1_cb) { @@ -117,12 +112,14 @@ void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { break; } - case ASN1_ITYPE_EXTERN: - ef = reinterpret_cast(it->funcs); + case ASN1_ITYPE_EXTERN: { + const ASN1_EXTERN_FUNCS *ef = + reinterpret_cast(it->funcs); if (ef && ef->asn1_ex_free) { ef->asn1_ex_free(pval, it); } break; + } case ASN1_ITYPE_SEQUENCE: { if (!asn1_refcount_dec_and_test_zero(pval, it)) { @@ -131,8 +128,7 @@ void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { - i = asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL); - if (i == 2) { + if (asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL) == 2) { return; } } @@ -140,14 +136,12 @@ void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { // If we free up as normal we will invalidate any ANY DEFINED BY // field and we wont be able to determine the type of the field it // defines. So free up in reverse order. - tt = it->templates + it->tcount - 1; - for (i = 0; i < it->tcount; tt--, i++) { - ASN1_VALUE **pseqval; - seqtt = asn1_do_adb(pval, tt, 0); + for (int i = it->tcount - 1; i >= 0; i--) { + const ASN1_TEMPLATE *seqtt = asn1_do_adb(pval, &it->templates[i], 0); if (!seqtt) { continue; } - pseqval = asn1_get_field_ptr(pval, seqtt); + ASN1_VALUE **pseqval = asn1_get_field_ptr(pval, seqtt); ASN1_template_free(pseqval, seqtt); } if (asn1_cb) { diff --git a/Sources/CCryptoBoringSSL/crypto/bcm_support.h b/Sources/CCryptoBoringSSL/crypto/bcm_support.h index fe5d712a..02fd5656 100644 --- a/Sources/CCryptoBoringSSL/crypto/bcm_support.h +++ b/Sources/CCryptoBoringSSL/crypto/bcm_support.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bio/socket_helper.cc b/Sources/CCryptoBoringSSL/crypto/bio/socket_helper.cc index 0df3b737..0354e285 100644 --- a/Sources/CCryptoBoringSSL/crypto/bio/socket_helper.cc +++ b/Sources/CCryptoBoringSSL/crypto/bio/socket_helper.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/blake2/blake2.cc b/Sources/CCryptoBoringSSL/crypto/blake2/blake2.cc index 691fc827..9e4302c2 100644 --- a/Sources/CCryptoBoringSSL/crypto/blake2/blake2.cc +++ b/Sources/CCryptoBoringSSL/crypto/blake2/blake2.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Google Inc. +/* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bn_extra/bn_asn1.cc b/Sources/CCryptoBoringSSL/crypto/bn_extra/bn_asn1.cc index 4fd28390..a4e47a8b 100644 --- a/Sources/CCryptoBoringSSL/crypto/bn_extra/bn_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/bn_extra/bn_asn1.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/asn1_compat.cc b/Sources/CCryptoBoringSSL/crypto/bytestring/asn1_compat.cc index e4525181..a221f1ac 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/asn1_compat.cc +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/asn1_compat.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/ber.cc b/Sources/CCryptoBoringSSL/crypto/bytestring/ber.cc index 35f88131..262f7f60 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/ber.cc +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/ber.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/cbb.cc b/Sources/CCryptoBoringSSL/crypto/bytestring/cbb.cc index cb54b9ca..b22f506b 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/cbb.cc +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/cbb.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/cbs.cc b/Sources/CCryptoBoringSSL/crypto/bytestring/cbs.cc index 3101bacb..cf3994c9 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/cbs.cc +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/cbs.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/internal.h b/Sources/CCryptoBoringSSL/crypto/bytestring/internal.h index 4f07deaf..878bad4e 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/bytestring/unicode.cc b/Sources/CCryptoBoringSSL/crypto/bytestring/unicode.cc index 4f990aef..292ac940 100644 --- a/Sources/CCryptoBoringSSL/crypto/bytestring/unicode.cc +++ b/Sources/CCryptoBoringSSL/crypto/bytestring/unicode.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/chacha/chacha.cc b/Sources/CCryptoBoringSSL/crypto/chacha/chacha.cc index f60b91b1..e2d502e8 100644 --- a/Sources/CCryptoBoringSSL/crypto/chacha/chacha.cc +++ b/Sources/CCryptoBoringSSL/crypto/chacha/chacha.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/chacha/internal.h b/Sources/CCryptoBoringSSL/crypto/chacha/internal.h index e5626474..a00d6c85 100644 --- a/Sources/CCryptoBoringSSL/crypto/chacha/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/chacha/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesctrhmac.cc b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesctrhmac.cc index 999de8f3..6e39fc4a 100644 --- a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesctrhmac.cc +++ b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesctrhmac.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesgcmsiv.cc b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesgcmsiv.cc index 9ef2b933..526db54c 100644 --- a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesgcmsiv.cc +++ b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_aesgcmsiv.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_chacha20poly1305.cc b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_chacha20poly1305.cc index b839380a..26bd1b40 100644 --- a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_chacha20poly1305.cc +++ b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_chacha20poly1305.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_tls.cc b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_tls.cc index 5dc66a9d..5d888bd8 100644 --- a/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_tls.cc +++ b/Sources/CCryptoBoringSSL/crypto/cipher_extra/e_tls.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/conf/internal.h b/Sources/CCryptoBoringSSL/crypto/conf/internal.h index 5eeab683..4fb75910 100644 --- a/Sources/CCryptoBoringSSL/crypto/conf/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/conf/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_apple.cc b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_apple.cc index 6e90c4c3..6e46a5b2 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_apple.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_apple.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Google Inc. +/* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_fuchsia.cc b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_fuchsia.cc index 36709b06..014a4ab5 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_fuchsia.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_fuchsia.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_linux.cc b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_linux.cc index 388a032b..6a11b6e7 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_linux.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_linux.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_sysreg.cc b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_sysreg.cc index 4d04f778..5ece6f20 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_sysreg.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_sysreg.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_win.cc b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_win.cc index 3d9fc0f1..a681def2 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_win.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_aarch64_win.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * Copyright (c) 2020, Arm Ltd. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_arm_freebsd.cc b/Sources/CCryptoBoringSSL/crypto/cpu_arm_freebsd.cc index c4504669..880e5038 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_arm_freebsd.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_arm_freebsd.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.cc b/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.cc index 93d26347..6d0750e9 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.h b/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.h index c7328dc2..cdd683ee 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.h +++ b/Sources/CCryptoBoringSSL/crypto/cpu_arm_linux.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/cpu_intel.cc b/Sources/CCryptoBoringSSL/crypto/cpu_intel.cc index a0cf3826..317e73fa 100644 --- a/Sources/CCryptoBoringSSL/crypto/cpu_intel.cc +++ b/Sources/CCryptoBoringSSL/crypto/cpu_intel.cc @@ -117,6 +117,22 @@ static uint64_t OPENSSL_xgetbv(uint32_t xcr) { #endif } +static bool os_supports_avx512(uint64_t xcr0) { +#if defined(__APPLE__) + // The Darwin kernel had a bug where it could corrupt the opmask registers. + // See + // https://community.intel.com/t5/Software-Tuning-Performance/MacOS-Darwin-kernel-bug-clobbers-AVX-512-opmask-register-state/m-p/1327259 + // Darwin also does not initially set the XCR0 bits for AVX512, but they are + // set if the thread tries to use AVX512 anyway. Thus, to safely and + // consistently use AVX512 on macOS we'd need to check the kernel version as + // well as detect AVX512 support using a macOS-specific method. We don't + // bother with this, especially given Apple's transition to arm64. + return false; +#else + return (xcr0 & 0xe6) == 0xe6; +#endif +} + // handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| // and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. static void handle_cpu_env(uint32_t *out, const char *in) { @@ -234,7 +250,7 @@ void OPENSSL_cpuid_setup(void) { // See Intel manual, volume 1, sections 15.2 ("Detection of AVX-512 Foundation // Instructions") through 15.4 ("Detection of Intel AVX-512 Instruction Groups // Operating at 256 and 128-bit Vector Lengths"). - if ((xcr0 & 0xe6) != 0xe6) { + if (!os_supports_avx512(xcr0)) { // Without XCR0.111xx11x, no AVX512 feature can be used. This includes ZMM // registers, masking, SIMD registers 16-31 (even if accessed as YMM or // XMM), and EVEX-coded instructions (even on YMM or XMM). Even if only diff --git a/Sources/CCryptoBoringSSL/crypto/crypto.cc b/Sources/CCryptoBoringSSL/crypto/crypto.cc index b147f44a..788a1477 100644 --- a/Sources/CCryptoBoringSSL/crypto/crypto.cc +++ b/Sources/CCryptoBoringSSL/crypto/crypto.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -54,7 +54,7 @@ static_assert(sizeof(ossl_ssize_t) == sizeof(size_t), // archive, linking on OS X will fail to resolve common symbols. By // initialising it to zero, it becomes a "data symbol", which isn't so // affected. -HIDDEN uint8_t BORINGSSL_function_hit[7] = {0}; +HIDDEN uint8_t BORINGSSL_function_hit[8] = {0}; #endif #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/asm/x25519-asm-arm.S b/Sources/CCryptoBoringSSL/crypto/curve25519/asm/x25519-asm-arm.S index 6406ab74..e7e05dc2 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/asm/x25519-asm-arm.S +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/asm/x25519-asm-arm.S @@ -1,6 +1,6 @@ #define BORINGSSL_PREFIX CCryptoBoringSSL #if defined(__arm__) && defined(__linux__) -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519.cc b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519.cc index 155ab99e..8f4dd7e9 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519.cc +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_64_adx.cc b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_64_adx.cc index 27689896..63e50dc2 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_64_adx.cc +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_64_adx.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_tables.h b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_tables.h index 6636a36a..f459ed33 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_tables.h +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/curve25519_tables.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/internal.h b/Sources/CCryptoBoringSSL/crypto/curve25519/internal.h index 6893c22b..402aaddc 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/curve25519/spake25519.cc b/Sources/CCryptoBoringSSL/crypto/curve25519/spake25519.cc index 00c9f608..b4666a7d 100644 --- a/Sources/CCryptoBoringSSL/crypto/curve25519/spake25519.cc +++ b/Sources/CCryptoBoringSSL/crypto/curve25519/spake25519.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/dh_extra/dh_asn1.cc b/Sources/CCryptoBoringSSL/crypto/dh_extra/dh_asn1.cc index 2444d844..b76c2fa1 100644 --- a/Sources/CCryptoBoringSSL/crypto/dh_extra/dh_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/dh_extra/dh_asn1.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2000. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2000. */ /* ==================================================================== * Copyright (c) 2000-2005 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/dilithium/dilithium.cc b/Sources/CCryptoBoringSSL/crypto/dilithium/dilithium.cc deleted file mode 100644 index 72f7e476..00000000 --- a/Sources/CCryptoBoringSSL/crypto/dilithium/dilithium.cc +++ /dev/null @@ -1,1538 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#define OPENSSL_UNSTABLE_EXPERIMENTAL_DILITHIUM -#include - -#include -#include - -#include -#include - -#include "../internal.h" -#include "../keccak/internal.h" -#include "./internal.h" - -#define DEGREE 256 -#define K 6 -#define L 5 -#define ETA 4 -#define TAU 49 -#define BETA 196 -#define OMEGA 55 - -#define RHO_BYTES 32 -#define SIGMA_BYTES 64 -#define K_BYTES 32 -#define TR_BYTES 64 -#define MU_BYTES 64 -#define RHO_PRIME_BYTES 64 -#define LAMBDA_BITS 192 -#define LAMBDA_BYTES (LAMBDA_BITS / 8) - -// 2^23 - 2^13 + 1 -static const uint32_t kPrime = 8380417; -// Inverse of -kPrime modulo 2^32 -static const uint32_t kPrimeNegInverse = 4236238847; -static const int kDroppedBits = 13; -static const uint32_t kHalfPrime = (8380417 - 1) / 2; -static const uint32_t kGamma1 = 1 << 19; -static const uint32_t kGamma2 = (8380417 - 1) / 32; -// 256^-1 mod kPrime, in Montgomery form. -static const uint32_t kInverseDegreeMontgomery = 41978; - -typedef struct scalar { - uint32_t c[DEGREE]; -} scalar; - -typedef struct vectork { - scalar v[K]; -} vectork; - -typedef struct vectorl { - scalar v[L]; -} vectorl; - -typedef struct matrix { - scalar v[K][L]; -} matrix; - -/* Arithmetic */ - -// This bit of Python will be referenced in some of the following comments: -// -// q = 8380417 -// # Inverse of -q modulo 2^32 -// q_neg_inverse = 4236238847 -// # 2^64 modulo q -// montgomery_square = 2365951 -// -// def bitreverse(i): -// ret = 0 -// for n in range(8): -// bit = i & 1 -// ret <<= 1 -// ret |= bit -// i >>= 1 -// return ret -// -// def montgomery_reduce(x): -// a = (x * q_neg_inverse) % 2**32 -// b = x + a * q -// assert b & 0xFFFF_FFFF == 0 -// c = b >> 32 -// assert c < q -// return c -// -// def montgomery_transform(x): -// return montgomery_reduce(x * montgomery_square) - -// kNTTRootsMontgomery = [ -// montgomery_transform(pow(1753, bitreverse(i), q)) for i in range(256) -// ] -static const uint32_t kNTTRootsMontgomery[256] = { - 4193792, 25847, 5771523, 7861508, 237124, 7602457, 7504169, 466468, - 1826347, 2353451, 8021166, 6288512, 3119733, 5495562, 3111497, 2680103, - 2725464, 1024112, 7300517, 3585928, 7830929, 7260833, 2619752, 6271868, - 6262231, 4520680, 6980856, 5102745, 1757237, 8360995, 4010497, 280005, - 2706023, 95776, 3077325, 3530437, 6718724, 4788269, 5842901, 3915439, - 4519302, 5336701, 3574422, 5512770, 3539968, 8079950, 2348700, 7841118, - 6681150, 6736599, 3505694, 4558682, 3507263, 6239768, 6779997, 3699596, - 811944, 531354, 954230, 3881043, 3900724, 5823537, 2071892, 5582638, - 4450022, 6851714, 4702672, 5339162, 6927966, 3475950, 2176455, 6795196, - 7122806, 1939314, 4296819, 7380215, 5190273, 5223087, 4747489, 126922, - 3412210, 7396998, 2147896, 2715295, 5412772, 4686924, 7969390, 5903370, - 7709315, 7151892, 8357436, 7072248, 7998430, 1349076, 1852771, 6949987, - 5037034, 264944, 508951, 3097992, 44288, 7280319, 904516, 3958618, - 4656075, 8371839, 1653064, 5130689, 2389356, 8169440, 759969, 7063561, - 189548, 4827145, 3159746, 6529015, 5971092, 8202977, 1315589, 1341330, - 1285669, 6795489, 7567685, 6940675, 5361315, 4499357, 4751448, 3839961, - 2091667, 3407706, 2316500, 3817976, 5037939, 2244091, 5933984, 4817955, - 266997, 2434439, 7144689, 3513181, 4860065, 4621053, 7183191, 5187039, - 900702, 1859098, 909542, 819034, 495491, 6767243, 8337157, 7857917, - 7725090, 5257975, 2031748, 3207046, 4823422, 7855319, 7611795, 4784579, - 342297, 286988, 5942594, 4108315, 3437287, 5038140, 1735879, 203044, - 2842341, 2691481, 5790267, 1265009, 4055324, 1247620, 2486353, 1595974, - 4613401, 1250494, 2635921, 4832145, 5386378, 1869119, 1903435, 7329447, - 7047359, 1237275, 5062207, 6950192, 7929317, 1312455, 3306115, 6417775, - 7100756, 1917081, 5834105, 7005614, 1500165, 777191, 2235880, 3406031, - 7838005, 5548557, 6709241, 6533464, 5796124, 4656147, 594136, 4603424, - 6366809, 2432395, 2454455, 8215696, 1957272, 3369112, 185531, 7173032, - 5196991, 162844, 1616392, 3014001, 810149, 1652634, 4686184, 6581310, - 5341501, 3523897, 3866901, 269760, 2213111, 7404533, 1717735, 472078, - 7953734, 1723600, 6577327, 1910376, 6712985, 7276084, 8119771, 4546524, - 5441381, 6144432, 7959518, 6094090, 183443, 7403526, 1612842, 4834730, - 7826001, 3919660, 8332111, 7018208, 3937738, 1400424, 7534263, 1976782}; - -// Reduces x mod kPrime in constant time, where 0 <= x < 2*kPrime. -static uint32_t reduce_once(uint32_t x) { - declassify_assert(x < 2 * kPrime); - // return x < kPrime ? x : x - kPrime; - return constant_time_select_int(constant_time_lt_w(x, kPrime), x, x - kPrime); -} - -// Returns the absolute value in constant time. -static uint32_t abs_signed(uint32_t x) { - // return is_positive(x) ? x : -x; - // Note: MSVC doesn't like applying the unary minus operator to unsigned types - // (warning C4146), so we write the negation as a bitwise not plus one - // (assuming two's complement representation). - return constant_time_select_int(constant_time_lt_w(x, 0x80000000), x, ~x + 1); -} - -// Returns the absolute value modulo kPrime. -static uint32_t abs_mod_prime(uint32_t x) { - declassify_assert(x < kPrime); - // return x > kHalfPrime ? kPrime - x : x; - return constant_time_select_int(constant_time_lt_w(kHalfPrime, x), kPrime - x, - x); -} - -// Returns the maximum of two values in constant time. -static uint32_t maximum(uint32_t x, uint32_t y) { - // return x < y ? y : x; - return constant_time_select_int(constant_time_lt_w(x, y), y, x); -} - -static void scalar_add(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); - } -} - -static void scalar_sub(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = reduce_once(kPrime + lhs->c[i] - rhs->c[i]); - } -} - -static uint32_t reduce_montgomery(uint64_t x) { - uint64_t a = (uint32_t)x * kPrimeNegInverse; - uint64_t b = x + a * kPrime; - declassify_assert((b & 0xffffffff) == 0); - uint32_t c = b >> 32; - return reduce_once(c); -} - -// Multiply two scalars in the number theoretically transformed state. -static void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = reduce_montgomery((uint64_t)lhs->c[i] * (uint64_t)rhs->c[i]); - } -} - -// In place number theoretic transform of a given scalar. -// -// FIPS 204, Algorithm 35 (`NTT`). -static void scalar_ntt(scalar *s) { - // Step: 1, 2, 4, 8, ..., 128 - // Offset: 128, 64, 32, 16, ..., 1 - int offset = DEGREE; - for (int step = 1; step < DEGREE; step <<= 1) { - offset >>= 1; - int k = 0; - for (int i = 0; i < step; i++) { - assert(k == 2 * offset * i); - const uint32_t step_root = kNTTRootsMontgomery[step + i]; - for (int j = k; j < k + offset; j++) { - uint32_t even = s->c[j]; - uint32_t odd = - reduce_montgomery((uint64_t)step_root * (uint64_t)s->c[j + offset]); - s->c[j] = reduce_once(odd + even); - s->c[j + offset] = reduce_once(kPrime + even - odd); - } - k += 2 * offset; - } - } -} - -// In place inverse number theoretic transform of a given scalar. -// -// FIPS 204, Algorithm 36 (`NTT^-1`). -static void scalar_inverse_ntt(scalar *s) { - // Step: 128, 64, 32, 16, ..., 1 - // Offset: 1, 2, 4, 8, ..., 128 - int step = DEGREE; - for (int offset = 1; offset < DEGREE; offset <<= 1) { - step >>= 1; - int k = 0; - for (int i = 0; i < step; i++) { - assert(k == 2 * offset * i); - const uint32_t step_root = - kPrime - kNTTRootsMontgomery[step + (step - 1 - i)]; - for (int j = k; j < k + offset; j++) { - uint32_t even = s->c[j]; - uint32_t odd = s->c[j + offset]; - s->c[j] = reduce_once(odd + even); - s->c[j + offset] = reduce_montgomery((uint64_t)step_root * - (uint64_t)(kPrime + even - odd)); - } - k += 2 * offset; - } - } - for (int i = 0; i < DEGREE; i++) { - s->c[i] = reduce_montgomery((uint64_t)s->c[i] * - (uint64_t)kInverseDegreeMontgomery); - } -} - -static void vectork_zero(vectork *out) { OPENSSL_memset(out, 0, sizeof(*out)); } - -static void vectork_add(vectork *out, const vectork *lhs, const vectork *rhs) { - for (int i = 0; i < K; i++) { - scalar_add(&out->v[i], &lhs->v[i], &rhs->v[i]); - } -} - -static void vectork_sub(vectork *out, const vectork *lhs, const vectork *rhs) { - for (int i = 0; i < K; i++) { - scalar_sub(&out->v[i], &lhs->v[i], &rhs->v[i]); - } -} - -static void vectork_mult_scalar(vectork *out, const vectork *lhs, - const scalar *rhs) { - for (int i = 0; i < K; i++) { - scalar_mult(&out->v[i], &lhs->v[i], rhs); - } -} - -static void vectork_ntt(vectork *a) { - for (int i = 0; i < K; i++) { - scalar_ntt(&a->v[i]); - } -} - -static void vectork_inverse_ntt(vectork *a) { - for (int i = 0; i < K; i++) { - scalar_inverse_ntt(&a->v[i]); - } -} - -static void vectorl_add(vectorl *out, const vectorl *lhs, const vectorl *rhs) { - for (int i = 0; i < L; i++) { - scalar_add(&out->v[i], &lhs->v[i], &rhs->v[i]); - } -} - -static void vectorl_mult_scalar(vectorl *out, const vectorl *lhs, - const scalar *rhs) { - for (int i = 0; i < L; i++) { - scalar_mult(&out->v[i], &lhs->v[i], rhs); - } -} - -static void vectorl_ntt(vectorl *a) { - for (int i = 0; i < L; i++) { - scalar_ntt(&a->v[i]); - } -} - -static void vectorl_inverse_ntt(vectorl *a) { - for (int i = 0; i < L; i++) { - scalar_inverse_ntt(&a->v[i]); - } -} - -static void matrix_mult(vectork *out, const matrix *m, const vectorl *a) { - vectork_zero(out); - for (int i = 0; i < K; i++) { - for (int j = 0; j < L; j++) { - scalar product; - scalar_mult(&product, &m->v[i][j], &a->v[j]); - scalar_add(&out->v[i], &out->v[i], &product); - } - } -} - -/* Rounding & hints */ - -// FIPS 204, Algorithm 29 (`Power2Round`). -static void power2_round(uint32_t *r1, uint32_t *r0, uint32_t r) { - *r1 = r >> kDroppedBits; - *r0 = r - (*r1 << kDroppedBits); - - uint32_t r0_adjusted = reduce_once(kPrime + *r0 - (1 << kDroppedBits)); - uint32_t r1_adjusted = *r1 + 1; - - // Mask is set iff r0 > 2^(dropped_bits - 1). - crypto_word_t mask = - constant_time_lt_w((uint32_t)(1 << (kDroppedBits - 1)), *r0); - // r0 = mask ? r0_adjusted : r0 - *r0 = constant_time_select_int(mask, r0_adjusted, *r0); - // r1 = mask ? r1_adjusted : r1 - *r1 = constant_time_select_int(mask, r1_adjusted, *r1); -} - -// Scale back previously rounded value. -static void scale_power2_round(uint32_t *out, uint32_t r1) { - // Pre-condition: 0 <= r1 <= 2^10 - 1 - *out = r1 << kDroppedBits; - // Post-condition: 0 <= out <= 2^23 - 2^13 = kPrime - 1 - assert(*out < kPrime); -} - -// FIPS 204, Algorithm 31 (`HighBits`). -static uint32_t high_bits(uint32_t x) { - // Reference description (given 0 <= x < q): - // - // ``` - // int32_t r0 = x mod+- (2 * kGamma2); - // if (x - r0 == q - 1) { - // return 0; - // } else { - // return (x - r0) / (2 * kGamma2); - // } - // ``` - // - // Below is the formula taken from the reference implementation. - // - // Here, kGamma2 == 2^18 - 2^8 - // This returns ((ceil(x / 2^7) * (2^10 + 1) + 2^21) / 2^22) mod 2^4 - uint32_t r1 = (x + 127) >> 7; - r1 = (r1 * 1025 + (1 << 21)) >> 22; - r1 &= 15; - return r1; -} - -// FIPS 204, Algorithm 30 (`Decompose`). -static void decompose(uint32_t *r1, int32_t *r0, uint32_t r) { - *r1 = high_bits(r); - - *r0 = r; - *r0 -= *r1 * 2 * (int32_t)kGamma2; - *r0 -= (((int32_t)kHalfPrime - *r0) >> 31) & (int32_t)kPrime; -} - -// FIPS 204, Algorithm 32 (`LowBits`). -static int32_t low_bits(uint32_t x) { - uint32_t r1; - int32_t r0; - decompose(&r1, &r0, x); - return r0; -} - -// FIPS 204, Algorithm 33 (`MakeHint`). -static int32_t make_hint(uint32_t ct0, uint32_t cs2, uint32_t w) { - uint32_t r_plus_z = reduce_once(kPrime + w - cs2); - uint32_t r = reduce_once(r_plus_z + ct0); - return high_bits(r) != high_bits(r_plus_z); -} - -// FIPS 204, Algorithm 34 (`UseHint`). -static uint32_t use_hint_vartime(uint32_t h, uint32_t r) { - uint32_t r1; - int32_t r0; - decompose(&r1, &r0, r); - - if (h) { - if (r0 > 0) { - return (r1 + 1) & 15; - } else { - return (r1 - 1) & 15; - } - } else { - return r1; - } -} - -static void scalar_power2_round(scalar *s1, scalar *s0, const scalar *s) { - for (int i = 0; i < DEGREE; i++) { - power2_round(&s1->c[i], &s0->c[i], s->c[i]); - } -} - -static void scalar_scale_power2_round(scalar *out, const scalar *in) { - for (int i = 0; i < DEGREE; i++) { - scale_power2_round(&out->c[i], in->c[i]); - } -} - -static void scalar_high_bits(scalar *out, const scalar *in) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = high_bits(in->c[i]); - } -} - -static void scalar_low_bits(scalar *out, const scalar *in) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = low_bits(in->c[i]); - } -} - -static void scalar_max(uint32_t *max, const scalar *s) { - for (int i = 0; i < DEGREE; i++) { - uint32_t abs = abs_mod_prime(s->c[i]); - *max = maximum(*max, abs); - } -} - -static void scalar_max_signed(uint32_t *max, const scalar *s) { - for (int i = 0; i < DEGREE; i++) { - uint32_t abs = abs_signed(s->c[i]); - *max = maximum(*max, abs); - } -} - -static void scalar_make_hint(scalar *out, const scalar *ct0, const scalar *cs2, - const scalar *w) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = make_hint(ct0->c[i], cs2->c[i], w->c[i]); - } -} - -static void scalar_use_hint_vartime(scalar *out, const scalar *h, - const scalar *r) { - for (int i = 0; i < DEGREE; i++) { - out->c[i] = use_hint_vartime(h->c[i], r->c[i]); - } -} - -static void vectork_power2_round(vectork *t1, vectork *t0, const vectork *t) { - for (int i = 0; i < K; i++) { - scalar_power2_round(&t1->v[i], &t0->v[i], &t->v[i]); - } -} - -static void vectork_scale_power2_round(vectork *out, const vectork *in) { - for (int i = 0; i < K; i++) { - scalar_scale_power2_round(&out->v[i], &in->v[i]); - } -} - -static void vectork_high_bits(vectork *out, const vectork *in) { - for (int i = 0; i < K; i++) { - scalar_high_bits(&out->v[i], &in->v[i]); - } -} - -static void vectork_low_bits(vectork *out, const vectork *in) { - for (int i = 0; i < K; i++) { - scalar_low_bits(&out->v[i], &in->v[i]); - } -} - -static uint32_t vectork_max(const vectork *a) { - uint32_t max = 0; - for (int i = 0; i < K; i++) { - scalar_max(&max, &a->v[i]); - } - return max; -} - -static uint32_t vectork_max_signed(const vectork *a) { - uint32_t max = 0; - for (int i = 0; i < K; i++) { - scalar_max_signed(&max, &a->v[i]); - } - return max; -} - -// The input vector contains only zeroes and ones. -static size_t vectork_count_ones(const vectork *a) { - size_t count = 0; - for (int i = 0; i < K; i++) { - for (int j = 0; j < DEGREE; j++) { - count += a->v[i].c[j]; - } - } - return count; -} - -static void vectork_make_hint(vectork *out, const vectork *ct0, - const vectork *cs2, const vectork *w) { - for (int i = 0; i < K; i++) { - scalar_make_hint(&out->v[i], &ct0->v[i], &cs2->v[i], &w->v[i]); - } -} - -static void vectork_use_hint_vartime(vectork *out, const vectork *h, - const vectork *r) { - for (int i = 0; i < K; i++) { - scalar_use_hint_vartime(&out->v[i], &h->v[i], &r->v[i]); - } -} - -static uint32_t vectorl_max(const vectorl *a) { - uint32_t max = 0; - for (int i = 0; i < L; i++) { - scalar_max(&max, &a->v[i]); - } - return max; -} - -/* Bit packing */ - -static const uint8_t kMasks[8] = {0x01, 0x03, 0x07, 0x0f, - 0x1f, 0x3f, 0x7f, 0xff}; - -// FIPS 204, Algorithm 10 (`SimpleBitPack`). -static void scalar_encode(uint8_t *out, const scalar *s, int bits) { - assert(bits <= (int)sizeof(*s->c) * 8 && bits != 1); - - uint8_t out_byte = 0; - int out_byte_bits = 0; - - for (int i = 0; i < DEGREE; i++) { - uint32_t element = s->c[i]; - int element_bits_done = 0; - - while (element_bits_done < bits) { - int chunk_bits = bits - element_bits_done; - int out_bits_remaining = 8 - out_byte_bits; - if (chunk_bits >= out_bits_remaining) { - chunk_bits = out_bits_remaining; - out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; - *out = out_byte; - out++; - out_byte_bits = 0; - out_byte = 0; - } else { - out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; - out_byte_bits += chunk_bits; - } - - element_bits_done += chunk_bits; - element >>= chunk_bits; - } - } - - if (out_byte_bits > 0) { - *out = out_byte; - } -} - -// FIPS 204, Algorithm 11 (`BitPack`). -static void scalar_encode_signed(uint8_t *out, const scalar *s, int bits, - uint32_t max) { - assert(bits <= (int)sizeof(*s->c) * 8 && bits != 1); - - uint8_t out_byte = 0; - int out_byte_bits = 0; - - for (int i = 0; i < DEGREE; i++) { - uint32_t element = reduce_once(kPrime + max - s->c[i]); - declassify_assert(element <= 2 * max); - int element_bits_done = 0; - - while (element_bits_done < bits) { - int chunk_bits = bits - element_bits_done; - int out_bits_remaining = 8 - out_byte_bits; - if (chunk_bits >= out_bits_remaining) { - chunk_bits = out_bits_remaining; - out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; - *out = out_byte; - out++; - out_byte_bits = 0; - out_byte = 0; - } else { - out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; - out_byte_bits += chunk_bits; - } - - element_bits_done += chunk_bits; - element >>= chunk_bits; - } - } - - if (out_byte_bits > 0) { - *out = out_byte; - } -} - -// FIPS 204, Algorithm 12 (`SimpleBitUnpack`). -static void scalar_decode(scalar *out, const uint8_t *in, int bits) { - assert(bits <= (int)sizeof(*out->c) * 8 && bits != 1); - - uint8_t in_byte = 0; - int in_byte_bits_left = 0; - - for (int i = 0; i < DEGREE; i++) { - uint32_t element = 0; - int element_bits_done = 0; - - while (element_bits_done < bits) { - if (in_byte_bits_left == 0) { - in_byte = *in; - in++; - in_byte_bits_left = 8; - } - - int chunk_bits = bits - element_bits_done; - if (chunk_bits > in_byte_bits_left) { - chunk_bits = in_byte_bits_left; - } - - element |= (in_byte & kMasks[chunk_bits - 1]) << element_bits_done; - in_byte_bits_left -= chunk_bits; - in_byte >>= chunk_bits; - - element_bits_done += chunk_bits; - } - - out->c[i] = element; - } -} - -// FIPS 204, Algorithm 13 (`BitUnpack`). -static int scalar_decode_signed(scalar *out, const uint8_t *in, int bits, - uint32_t max) { - assert(bits <= (int)sizeof(*out->c) * 8 && bits != 1); - - uint8_t in_byte = 0; - int in_byte_bits_left = 0; - - for (int i = 0; i < DEGREE; i++) { - uint32_t element = 0; - int element_bits_done = 0; - - while (element_bits_done < bits) { - if (in_byte_bits_left == 0) { - in_byte = *in; - in++; - in_byte_bits_left = 8; - } - - int chunk_bits = bits - element_bits_done; - if (chunk_bits > in_byte_bits_left) { - chunk_bits = in_byte_bits_left; - } - - element |= (in_byte & kMasks[chunk_bits - 1]) << element_bits_done; - in_byte_bits_left -= chunk_bits; - in_byte >>= chunk_bits; - - element_bits_done += chunk_bits; - } - - // This may be only out of range in cases of invalid input, in which case it - // is okay to leak the value. This function is also called with secret - // input during signing, in |scalar_sample_mask|. However, in that case - // (and in any case when |max| is a power of two), this case is impossible. - if (constant_time_declassify_int(element > 2 * max)) { - return 0; - } - out->c[i] = reduce_once(kPrime + max - element); - } - - return 1; -} - -/* Expansion functions */ - -// FIPS 204, Algorithm 24 (`RejNTTPoly`). -// -// Rejection samples a Keccak stream to get uniformly distributed elements. This -// is used for matrix expansion and only operates on public inputs. -static void scalar_from_keccak_vartime( - scalar *out, const uint8_t derived_seed[RHO_BYTES + 2]) { - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); - BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, RHO_BYTES + 2); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 168); - static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); - - int done = 0; - while (done < DEGREE) { - uint8_t block[168]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - for (size_t i = 0; i < sizeof(block) && done < DEGREE; i += 3) { - // FIPS 204, Algorithm 8 (`CoeffFromThreeBytes`). - uint32_t value = (uint32_t)block[i] | ((uint32_t)block[i + 1] << 8) | - (((uint32_t)block[i + 2] & 0x7f) << 16); - if (value < kPrime) { - out->c[done++] = value; - } - } - } -} - -// FIPS 204, Algorithm 25 (`RejBoundedPoly`). -static void scalar_uniform_eta_4(scalar *out, - const uint8_t derived_seed[SIGMA_BYTES + 2]) { - static_assert(ETA == 4, "This implementation is specialized for ETA == 4"); - - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, SIGMA_BYTES + 2); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 136); - - int done = 0; - while (done < DEGREE) { - uint8_t block[136]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - for (size_t i = 0; i < sizeof(block) && done < DEGREE; ++i) { - uint32_t t0 = block[i] & 0x0F; - uint32_t t1 = block[i] >> 4; - // FIPS 204, Algorithm 9 (`CoefFromHalfByte`). Although both the input and - // output here are secret, it is OK to leak when we rejected a byte. - // Individual bytes of the SHAKE-256 stream are (indistiguishable from) - // independent of each other and the original seed, so leaking information - // about the rejected bytes does not reveal the input or output. - if (constant_time_declassify_int(t0 < 9)) { - out->c[done++] = reduce_once(kPrime + ETA - t0); - } - if (done < DEGREE && constant_time_declassify_int(t1 < 9)) { - out->c[done++] = reduce_once(kPrime + ETA - t1); - } - } - } -} - -// FIPS 204, Algorithm 28 (`ExpandMask`). -static void scalar_sample_mask( - scalar *out, const uint8_t derived_seed[RHO_PRIME_BYTES + 2]) { - uint8_t buf[640]; - BORINGSSL_keccak(buf, sizeof(buf), derived_seed, RHO_PRIME_BYTES + 2, - boringssl_shake256); - - // Note: Decoding 20 bits into (-2^19, 2^19] cannot fail. - scalar_decode_signed(out, buf, 20, 1 << 19); -} - -// FIPS 204, Algorithm 23 (`SampleInBall`). -static void scalar_sample_in_ball_vartime(scalar *out, const uint8_t *seed, - int len) { - assert(len == 32); - - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, seed, len); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 136); - - uint8_t block[136]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - - uint64_t signs = CRYPTO_load_u64_le(block); - int offset = 8; - // SampleInBall implements a Fisher–Yates shuffle, which unavoidably leaks - // where the zeros are by memory access pattern. Although this leak happens - // before bad signatures are rejected, this is safe. See - // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/8d8f01ac_70af3f21/ - CONSTTIME_DECLASSIFY(block + offset, sizeof(block) - offset); - - OPENSSL_memset(out, 0, sizeof(*out)); - for (size_t i = DEGREE - TAU; i < DEGREE; i++) { - size_t byte; - for (;;) { - if (offset == 136) { - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - // See above. - CONSTTIME_DECLASSIFY(block, sizeof(block)); - offset = 0; - } - - byte = block[offset++]; - if (byte <= i) { - break; - } - } - - out->c[i] = out->c[byte]; - out->c[byte] = reduce_once(kPrime + 1 - 2 * (signs & 1)); - signs >>= 1; - } -} - -// FIPS 204, Algorithm 26 (`ExpandA`). -static void matrix_expand(matrix *out, const uint8_t rho[RHO_BYTES]) { - static_assert(K <= 0x100, "K must fit in 8 bits"); - static_assert(L <= 0x100, "L must fit in 8 bits"); - - uint8_t derived_seed[RHO_BYTES + 2]; - OPENSSL_memcpy(derived_seed, rho, RHO_BYTES); - for (int i = 0; i < K; i++) { - for (int j = 0; j < L; j++) { - derived_seed[RHO_BYTES + 1] = i; - derived_seed[RHO_BYTES] = j; - scalar_from_keccak_vartime(&out->v[i][j], derived_seed); - } - } -} - -// FIPS 204, Algorithm 27 (`ExpandS`). -static void vector_expand_short(vectorl *s1, vectork *s2, - const uint8_t sigma[SIGMA_BYTES]) { - static_assert(K <= 0x100, "K must fit in 8 bits"); - static_assert(L <= 0x100, "L must fit in 8 bits"); - static_assert(K + L <= 0x100, "K+L must fit in 8 bits"); - - uint8_t derived_seed[SIGMA_BYTES + 2]; - OPENSSL_memcpy(derived_seed, sigma, SIGMA_BYTES); - derived_seed[SIGMA_BYTES] = 0; - derived_seed[SIGMA_BYTES + 1] = 0; - for (int i = 0; i < L; i++) { - scalar_uniform_eta_4(&s1->v[i], derived_seed); - ++derived_seed[SIGMA_BYTES]; - } - for (int i = 0; i < K; i++) { - scalar_uniform_eta_4(&s2->v[i], derived_seed); - ++derived_seed[SIGMA_BYTES]; - } -} - -// FIPS 204, Algorithm 28 (`ExpandMask`). -static void vectorl_expand_mask(vectorl *out, - const uint8_t seed[RHO_PRIME_BYTES], - size_t kappa) { - assert(kappa + L <= 0x10000); - - uint8_t derived_seed[RHO_PRIME_BYTES + 2]; - OPENSSL_memcpy(derived_seed, seed, RHO_PRIME_BYTES); - for (int i = 0; i < L; i++) { - size_t index = kappa + i; - derived_seed[RHO_PRIME_BYTES] = index & 0xFF; - derived_seed[RHO_PRIME_BYTES + 1] = (index >> 8) & 0xFF; - scalar_sample_mask(&out->v[i], derived_seed); - } -} - -/* Encoding */ - -// FIPS 204, Algorithm 10 (`SimpleBitPack`). -// -// Encodes an entire vector into 32*K*|bits| bytes. Note that since 256 (DEGREE) -// is divisible by 8, the individual vector entries will always fill a whole -// number of bytes, so we do not need to worry about bit packing here. -static void vectork_encode(uint8_t *out, const vectork *a, int bits) { - for (int i = 0; i < K; i++) { - scalar_encode(out + i * bits * DEGREE / 8, &a->v[i], bits); - } -} - -// FIPS 204, Algorithm 12 (`SimpleBitUnpack`). -static void vectork_decode(vectork *out, const uint8_t *in, int bits) { - for (int i = 0; i < K; i++) { - scalar_decode(&out->v[i], in + i * bits * DEGREE / 8, bits); - } -} - -static void vectork_encode_signed(uint8_t *out, const vectork *a, int bits, - uint32_t max) { - for (int i = 0; i < K; i++) { - scalar_encode_signed(out + i * bits * DEGREE / 8, &a->v[i], bits, max); - } -} - -static int vectork_decode_signed(vectork *out, const uint8_t *in, int bits, - uint32_t max) { - for (int i = 0; i < K; i++) { - if (!scalar_decode_signed(&out->v[i], in + i * bits * DEGREE / 8, bits, - max)) { - return 0; - } - } - return 1; -} - -// FIPS 204, Algorithm 11 (`BitPack`). -// -// Encodes an entire vector into 32*L*|bits| bytes. Note that since 256 (DEGREE) -// is divisible by 8, the individual vector entries will always fill a whole -// number of bytes, so we do not need to worry about bit packing here. -static void vectorl_encode_signed(uint8_t *out, const vectorl *a, int bits, - uint32_t max) { - for (int i = 0; i < L; i++) { - scalar_encode_signed(out + i * bits * DEGREE / 8, &a->v[i], bits, max); - } -} - -static int vectorl_decode_signed(vectorl *out, const uint8_t *in, int bits, - uint32_t max) { - for (int i = 0; i < L; i++) { - if (!scalar_decode_signed(&out->v[i], in + i * bits * DEGREE / 8, bits, - max)) { - return 0; - } - } - return 1; -} - -// FIPS 204, Algorithm 22 (`w1Encode`). -// -// The output must point to an array of 128*K bytes. -static void w1_encode(uint8_t *out, const vectork *w1) { - vectork_encode(out, w1, 4); -} - -// FIPS 204, Algorithm 14 (`HintBitPack`). -static void hint_bit_pack(uint8_t *out, const vectork *h) { - OPENSSL_memset(out, 0, OMEGA + K); - int index = 0; - for (int i = 0; i < K; i++) { - for (int j = 0; j < DEGREE; j++) { - if (h->v[i].c[j]) { - out[index++] = j; - } - } - out[OMEGA + i] = index; - } -} - -// FIPS 204, Algorithm 15 (`HintBitUnpack`). -static int hint_bit_unpack(vectork *h, const uint8_t *in) { - vectork_zero(h); - int index = 0; - for (int i = 0; i < K; i++) { - int limit = in[OMEGA + i]; - if (limit < index || limit > OMEGA) { - return 0; - } - - int last = -1; - while (index < limit) { - int byte = in[index++]; - if (last >= 0 && byte <= last) { - return 0; - } - last = byte; - h->v[i].c[byte] = 1; - } - } - for (; index < OMEGA; index++) { - if (in[index] != 0) { - return 0; - } - } - return 1; -} - -struct public_key { - uint8_t rho[RHO_BYTES]; - vectork t1; - // Pre-cached value(s). - uint8_t public_key_hash[TR_BYTES]; -}; - -struct private_key { - uint8_t rho[RHO_BYTES]; - uint8_t k[K_BYTES]; - uint8_t public_key_hash[TR_BYTES]; - vectorl s1; - vectork s2; - vectork t0; -}; - -struct signature { - uint8_t c_tilde[2 * LAMBDA_BYTES]; - vectorl z; - vectork h; -}; - -// FIPS 204, Algorithm 16 (`pkEncode`). -static int dilithium_marshal_public_key(CBB *out, - const struct public_key *pub) { - if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { - return 0; - } - - uint8_t *vectork_output; - if (!CBB_add_space(out, &vectork_output, 320 * K)) { - return 0; - } - vectork_encode(vectork_output, &pub->t1, 10); - - return 1; -} - -// FIPS 204, Algorithm 17 (`pkDecode`). -static int dilithium_parse_public_key(struct public_key *pub, CBS *in) { - if (!CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { - return 0; - } - - CBS t1_bytes; - if (!CBS_get_bytes(in, &t1_bytes, 320 * K)) { - return 0; - } - vectork_decode(&pub->t1, CBS_data(&t1_bytes), 10); - - return 1; -} - -// FIPS 204, Algorithm 18 (`skEncode`). -static int dilithium_marshal_private_key(CBB *out, - const struct private_key *priv) { - if (!CBB_add_bytes(out, priv->rho, sizeof(priv->rho)) || - !CBB_add_bytes(out, priv->k, sizeof(priv->k)) || - !CBB_add_bytes(out, priv->public_key_hash, - sizeof(priv->public_key_hash))) { - return 0; - } - - uint8_t *vectorl_output; - if (!CBB_add_space(out, &vectorl_output, 128 * L)) { - return 0; - } - vectorl_encode_signed(vectorl_output, &priv->s1, 4, ETA); - - uint8_t *vectork_output; - if (!CBB_add_space(out, &vectork_output, 128 * K)) { - return 0; - } - vectork_encode_signed(vectork_output, &priv->s2, 4, ETA); - - if (!CBB_add_space(out, &vectork_output, 416 * K)) { - return 0; - } - vectork_encode_signed(vectork_output, &priv->t0, 13, 1 << 12); - - return 1; -} - -// FIPS 204, Algorithm 19 (`skDecode`). -static int dilithium_parse_private_key(struct private_key *priv, CBS *in) { - CBS s1_bytes; - CBS s2_bytes; - CBS t0_bytes; - if (!CBS_copy_bytes(in, priv->rho, sizeof(priv->rho)) || - !CBS_copy_bytes(in, priv->k, sizeof(priv->k)) || - !CBS_copy_bytes(in, priv->public_key_hash, - sizeof(priv->public_key_hash)) || - !CBS_get_bytes(in, &s1_bytes, 128 * L) || - !vectorl_decode_signed(&priv->s1, CBS_data(&s1_bytes), 4, ETA) || - !CBS_get_bytes(in, &s2_bytes, 128 * K) || - !vectork_decode_signed(&priv->s2, CBS_data(&s2_bytes), 4, ETA) || - !CBS_get_bytes(in, &t0_bytes, 416 * K) || - // Note: Decoding 13 bits into (-2^12, 2^12] cannot fail. - !vectork_decode_signed(&priv->t0, CBS_data(&t0_bytes), 13, 1 << 12)) { - return 0; - } - - return 1; -} - -// FIPS 204, Algorithm 20 (`sigEncode`). -static int dilithium_marshal_signature(CBB *out, const struct signature *sign) { - if (!CBB_add_bytes(out, sign->c_tilde, sizeof(sign->c_tilde))) { - return 0; - } - - uint8_t *vectorl_output; - if (!CBB_add_space(out, &vectorl_output, 640 * L)) { - return 0; - } - vectorl_encode_signed(vectorl_output, &sign->z, 20, 1 << 19); - - uint8_t *hint_output; - if (!CBB_add_space(out, &hint_output, OMEGA + K)) { - return 0; - } - hint_bit_pack(hint_output, &sign->h); - - return 1; -} - -// FIPS 204, Algorithm 21 (`sigDecode`). -static int dilithium_parse_signature(struct signature *sign, CBS *in) { - CBS z_bytes; - CBS hint_bytes; - if (!CBS_copy_bytes(in, sign->c_tilde, sizeof(sign->c_tilde)) || - !CBS_get_bytes(in, &z_bytes, 640 * L) || - // Note: Decoding 20 bits into (-2^19, 2^19] cannot fail. - !vectorl_decode_signed(&sign->z, CBS_data(&z_bytes), 20, 1 << 19) || - !CBS_get_bytes(in, &hint_bytes, OMEGA + K) || - !hint_bit_unpack(&sign->h, CBS_data(&hint_bytes))) { - return 0; - }; - - return 1; -} - -static struct private_key *private_key_from_external( - const struct DILITHIUM_private_key *external) { - static_assert( - sizeof(struct DILITHIUM_private_key) == sizeof(struct private_key), - "Kyber private key size incorrect"); - static_assert( - alignof(struct DILITHIUM_private_key) == alignof(struct private_key), - "Kyber private key align incorrect"); - return (struct private_key *)external; -} - -static struct public_key *public_key_from_external( - const struct DILITHIUM_public_key *external) { - static_assert( - sizeof(struct DILITHIUM_public_key) == sizeof(struct public_key), - "Dilithium public key size incorrect"); - static_assert( - alignof(struct DILITHIUM_public_key) == alignof(struct public_key), - "Dilithium public key align incorrect"); - return (struct public_key *)external; -} - -/* API */ - -// Calls |DILITHIUM_generate_key_external_entropy| with random bytes from -// |RAND_bytes|. Returns 1 on success and 0 on failure. -int DILITHIUM_generate_key( - uint8_t out_encoded_public_key[DILITHIUM_PUBLIC_KEY_BYTES], - struct DILITHIUM_private_key *out_private_key) { - uint8_t entropy[DILITHIUM_GENERATE_KEY_ENTROPY]; - RAND_bytes(entropy, sizeof(entropy)); - return DILITHIUM_generate_key_external_entropy(out_encoded_public_key, - out_private_key, entropy); -} - -// FIPS 204, Algorithm 1 (`ML-DSA.KeyGen`). Returns 1 on success and 0 on -// failure. -int DILITHIUM_generate_key_external_entropy( - uint8_t out_encoded_public_key[DILITHIUM_PUBLIC_KEY_BYTES], - struct DILITHIUM_private_key *out_private_key, - const uint8_t entropy[DILITHIUM_GENERATE_KEY_ENTROPY]) { - int ret = 0; - - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct public_key pub; - matrix a_ntt; - vectorl s1_ntt; - vectork t; - }; - struct values_st *values = - reinterpret_cast(OPENSSL_malloc(sizeof(*values))); - if (values == NULL) { - return 0; - } - - struct private_key *priv = private_key_from_external(out_private_key); - - uint8_t expanded_seed[RHO_BYTES + SIGMA_BYTES + K_BYTES]; - BORINGSSL_keccak(expanded_seed, sizeof(expanded_seed), entropy, - DILITHIUM_GENERATE_KEY_ENTROPY, boringssl_shake256); - const uint8_t *const rho = expanded_seed; - const uint8_t *const sigma = expanded_seed + RHO_BYTES; - const uint8_t *const k = expanded_seed + RHO_BYTES + SIGMA_BYTES; - // rho is public. - CONSTTIME_DECLASSIFY(rho, RHO_BYTES); - OPENSSL_memcpy(values->pub.rho, rho, sizeof(values->pub.rho)); - OPENSSL_memcpy(priv->rho, rho, sizeof(priv->rho)); - OPENSSL_memcpy(priv->k, k, sizeof(priv->k)); - - matrix_expand(&values->a_ntt, rho); - vector_expand_short(&priv->s1, &priv->s2, sigma); - - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vectorl_ntt(&values->s1_ntt); - - matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); - vectork_inverse_ntt(&values->t); - vectork_add(&values->t, &values->t, &priv->s2); - - vectork_power2_round(&values->pub.t1, &priv->t0, &values->t); - // t1 is public. - CONSTTIME_DECLASSIFY(&values->pub.t1, sizeof(values->pub.t1)); - - CBB cbb; - CBB_init_fixed(&cbb, out_encoded_public_key, DILITHIUM_PUBLIC_KEY_BYTES); - if (!dilithium_marshal_public_key(&cbb, &values->pub)) { - goto err; - } - - BORINGSSL_keccak(priv->public_key_hash, sizeof(priv->public_key_hash), - out_encoded_public_key, DILITHIUM_PUBLIC_KEY_BYTES, - boringssl_shake256); - - ret = 1; -err: - OPENSSL_free(values); - return ret; -} - -int DILITHIUM_public_from_private( - struct DILITHIUM_public_key *out_public_key, - const struct DILITHIUM_private_key *private_key) { - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - matrix a_ntt; - vectorl s1_ntt; - vectork t; - vectork t0; - }; - struct values_st *values = - reinterpret_cast(OPENSSL_malloc(sizeof(*values))); - if (values == NULL) { - return 0; - } - - const struct private_key *priv = private_key_from_external(private_key); - struct public_key *pub = public_key_from_external(out_public_key); - - OPENSSL_memcpy(pub->rho, priv->rho, sizeof(pub->rho)); - OPENSSL_memcpy(pub->public_key_hash, priv->public_key_hash, - sizeof(pub->public_key_hash)); - - matrix_expand(&values->a_ntt, priv->rho); - - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vectorl_ntt(&values->s1_ntt); - - matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); - vectork_inverse_ntt(&values->t); - vectork_add(&values->t, &values->t, &priv->s2); - - vectork_power2_round(&pub->t1, &values->t0, &values->t); - - OPENSSL_free(values); - return 1; -} - -// FIPS 204, Algorithm 2 (`ML-DSA.Sign`). Returns 1 on success and 0 on failure. -static int dilithium_sign_with_randomizer( - uint8_t out_encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const struct DILITHIUM_private_key *private_key, const uint8_t *msg, - size_t msg_len, - const uint8_t randomizer[DILITHIUM_SIGNATURE_RANDOMIZER_BYTES]) { - int ret = 0; - - const struct private_key *priv = private_key_from_external(private_key); - - uint8_t mu[MU_BYTES]; - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, priv->public_key_hash, - sizeof(priv->public_key_hash)); - BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); - BORINGSSL_keccak_squeeze(&keccak_ctx, mu, MU_BYTES); - - uint8_t rho_prime[RHO_PRIME_BYTES]; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, priv->k, sizeof(priv->k)); - BORINGSSL_keccak_absorb(&keccak_ctx, randomizer, - DILITHIUM_SIGNATURE_RANDOMIZER_BYTES); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, MU_BYTES); - BORINGSSL_keccak_squeeze(&keccak_ctx, rho_prime, RHO_PRIME_BYTES); - - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct signature sign; - vectorl s1_ntt; - vectork s2_ntt; - vectork t0_ntt; - matrix a_ntt; - vectorl y; - vectorl y_ntt; - vectork w; - vectork w1; - vectorl cs1; - vectork cs2; - vectork r0; - vectork ct0; - }; - struct values_st *values = - reinterpret_cast(OPENSSL_malloc(sizeof(*values))); - if (values == NULL) { - goto err; - } - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vectorl_ntt(&values->s1_ntt); - - OPENSSL_memcpy(&values->s2_ntt, &priv->s2, sizeof(values->s2_ntt)); - vectork_ntt(&values->s2_ntt); - - OPENSSL_memcpy(&values->t0_ntt, &priv->t0, sizeof(values->t0_ntt)); - vectork_ntt(&values->t0_ntt); - - matrix_expand(&values->a_ntt, priv->rho); - - for (size_t kappa = 0;; kappa += L) { - // TODO(bbe): y only lives long enough to compute y_ntt. - // consider using another vectorl to save memory. - vectorl_expand_mask(&values->y, rho_prime, kappa); - - OPENSSL_memcpy(&values->y_ntt, &values->y, sizeof(values->y_ntt)); - vectorl_ntt(&values->y_ntt); - - // TODO(bbe): w only lives long enough to compute y_ntt. - // consider using another vectork to save memory. - matrix_mult(&values->w, &values->a_ntt, &values->y_ntt); - vectork_inverse_ntt(&values->w); - - vectork_high_bits(&values->w1, &values->w); - uint8_t w1_encoded[128 * K]; - w1_encode(w1_encoded, &values->w1); - - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, MU_BYTES); - BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); - BORINGSSL_keccak_squeeze(&keccak_ctx, values->sign.c_tilde, - 2 * LAMBDA_BYTES); - - scalar c_ntt; - scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, 32); - scalar_ntt(&c_ntt); - - vectorl_mult_scalar(&values->cs1, &values->s1_ntt, &c_ntt); - vectorl_inverse_ntt(&values->cs1); - vectork_mult_scalar(&values->cs2, &values->s2_ntt, &c_ntt); - vectork_inverse_ntt(&values->cs2); - - vectorl_add(&values->sign.z, &values->y, &values->cs1); - - vectork_sub(&values->r0, &values->w, &values->cs2); - vectork_low_bits(&values->r0, &values->r0); - - // Leaking the fact that a signature was rejected is fine as the next - // attempt at a signature will be (indistinguishable from) independent of - // this one. Note, however, that we additionally leak which of the two - // branches rejected the signature. Section 5.5 of - // https://pq-crystals.org/dilithium/data/dilithium-specification-round3.pdf - // describes this leak as OK. Note we leak less than what is described by - // the paper; we do not reveal which coefficient violated the bound, and we - // hide which of the |z_max| or |r0_max| bound failed. See also - // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/2bbab0fa_d241d35a/ - uint32_t z_max = vectorl_max(&values->sign.z); - uint32_t r0_max = vectork_max_signed(&values->r0); - if (constant_time_declassify_w( - constant_time_ge_w(z_max, kGamma1 - BETA) | - constant_time_ge_w(r0_max, kGamma2 - BETA))) { - continue; - } - - vectork_mult_scalar(&values->ct0, &values->t0_ntt, &c_ntt); - vectork_inverse_ntt(&values->ct0); - vectork_make_hint(&values->sign.h, &values->ct0, &values->cs2, &values->w); - - // See above. - uint32_t ct0_max = vectork_max(&values->ct0); - size_t h_ones = vectork_count_ones(&values->sign.h); - if (constant_time_declassify_w(constant_time_ge_w(ct0_max, kGamma2) | - constant_time_lt_w(OMEGA, h_ones))) { - continue; - } - - // Although computed with the private key, the signature is public. - CONSTTIME_DECLASSIFY(values->sign.c_tilde, sizeof(values->sign.c_tilde)); - CONSTTIME_DECLASSIFY(&values->sign.z, sizeof(values->sign.z)); - CONSTTIME_DECLASSIFY(&values->sign.h, sizeof(values->sign.h)); - - CBB cbb; - CBB_init_fixed(&cbb, out_encoded_signature, DILITHIUM_SIGNATURE_BYTES); - if (!dilithium_marshal_signature(&cbb, &values->sign)) { - goto err; - } - - BSSL_CHECK(CBB_len(&cbb) == DILITHIUM_SIGNATURE_BYTES); - ret = 1; - break; - } - -err: - OPENSSL_free(values); - return ret; -} - -// Dilithium signature in deterministic mode. Returns 1 on success and 0 on -// failure. -int DILITHIUM_sign_deterministic( - uint8_t out_encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const struct DILITHIUM_private_key *private_key, const uint8_t *msg, - size_t msg_len) { - uint8_t randomizer[DILITHIUM_SIGNATURE_RANDOMIZER_BYTES]; - OPENSSL_memset(randomizer, 0, sizeof(randomizer)); - return dilithium_sign_with_randomizer(out_encoded_signature, private_key, msg, - msg_len, randomizer); -} - -// Dilithium signature in randomized mode, filling the random bytes with -// |RAND_bytes|. Returns 1 on success and 0 on failure. -int DILITHIUM_sign(uint8_t out_encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const struct DILITHIUM_private_key *private_key, - const uint8_t *msg, size_t msg_len) { - uint8_t randomizer[DILITHIUM_SIGNATURE_RANDOMIZER_BYTES]; - RAND_bytes(randomizer, sizeof(randomizer)); - return dilithium_sign_with_randomizer(out_encoded_signature, private_key, msg, - msg_len, randomizer); -} - -// FIPS 204, Algorithm 3 (`ML-DSA.Verify`). -int DILITHIUM_verify(const struct DILITHIUM_public_key *public_key, - const uint8_t encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const uint8_t *msg, size_t msg_len) { - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct signature sign; - matrix a_ntt; - vectorl z_ntt; - vectork az_ntt; - vectork t1_ntt; - vectork ct1_ntt; - vectork w_approx; - vectork w1; - }; - struct values_st *values = - reinterpret_cast(OPENSSL_malloc(sizeof(*values))); - if (values == NULL) { - return 0; - } - - const struct public_key *pub = public_key_from_external(public_key); - - CBS cbs; - CBS_init(&cbs, encoded_signature, DILITHIUM_SIGNATURE_BYTES); - if (!dilithium_parse_signature(&values->sign, &cbs)) { - OPENSSL_free(values); - return 0; - } - - matrix_expand(&values->a_ntt, pub->rho); - - uint8_t mu[MU_BYTES]; - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, pub->public_key_hash, - sizeof(pub->public_key_hash)); - BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); - BORINGSSL_keccak_squeeze(&keccak_ctx, mu, MU_BYTES); - - scalar c_ntt; - scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, 32); - scalar_ntt(&c_ntt); - - OPENSSL_memcpy(&values->z_ntt, &values->sign.z, sizeof(values->z_ntt)); - vectorl_ntt(&values->z_ntt); - - matrix_mult(&values->az_ntt, &values->a_ntt, &values->z_ntt); - - vectork_scale_power2_round(&values->t1_ntt, &pub->t1); - vectork_ntt(&values->t1_ntt); - - vectork_mult_scalar(&values->ct1_ntt, &values->t1_ntt, &c_ntt); - - vectork_sub(&values->w_approx, &values->az_ntt, &values->ct1_ntt); - vectork_inverse_ntt(&values->w_approx); - - vectork_use_hint_vartime(&values->w1, &values->sign.h, &values->w_approx); - uint8_t w1_encoded[128 * K]; - w1_encode(w1_encoded, &values->w1); - - uint8_t c_tilde[2 * LAMBDA_BYTES]; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, MU_BYTES); - BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); - BORINGSSL_keccak_squeeze(&keccak_ctx, c_tilde, 2 * LAMBDA_BYTES); - - uint32_t z_max = vectorl_max(&values->sign.z); - size_t h_ones = vectork_count_ones(&values->sign.h); - int ret = 0; - if (z_max < kGamma1 - BETA && h_ones <= OMEGA && - OPENSSL_memcmp(c_tilde, values->sign.c_tilde, 2 * LAMBDA_BYTES) == 0) { - ret = 1; - } - - OPENSSL_free(values); - return ret; -} - -/* Serialization of keys. */ - -int DILITHIUM_marshal_public_key( - CBB *out, const struct DILITHIUM_public_key *public_key) { - return dilithium_marshal_public_key(out, - public_key_from_external(public_key)); -} - -int DILITHIUM_parse_public_key(struct DILITHIUM_public_key *public_key, - CBS *in) { - struct public_key *pub = public_key_from_external(public_key); - CBS orig_in = *in; - if (!dilithium_parse_public_key(pub, in) || CBS_len(in) != 0) { - return 0; - } - - // Compute pre-cached values. - BORINGSSL_keccak(pub->public_key_hash, sizeof(pub->public_key_hash), - CBS_data(&orig_in), CBS_len(&orig_in), boringssl_shake256); - return 1; -} - -int DILITHIUM_marshal_private_key( - CBB *out, const struct DILITHIUM_private_key *private_key) { - return dilithium_marshal_private_key(out, - private_key_from_external(private_key)); -} - -int DILITHIUM_parse_private_key(struct DILITHIUM_private_key *private_key, - CBS *in) { - struct private_key *priv = private_key_from_external(private_key); - return dilithium_parse_private_key(priv, in) && CBS_len(in) == 0; -} diff --git a/Sources/CCryptoBoringSSL/crypto/dilithium/internal.h b/Sources/CCryptoBoringSSL/crypto/dilithium/internal.h deleted file mode 100644 index a8a7b3d1..00000000 --- a/Sources/CCryptoBoringSSL/crypto/dilithium/internal.h +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_DILITHIUM_INTERNAL_H -#define OPENSSL_HEADER_CRYPTO_DILITHIUM_INTERNAL_H - -#include -#include - -#if defined(__cplusplus) -extern "C" { -#endif - - -// DILITHIUM_GENERATE_KEY_ENTROPY is the number of bytes of uniformly random -// entropy necessary to generate a key pair. -#define DILITHIUM_GENERATE_KEY_ENTROPY 32 - -// DILITHIUM_SIGNATURE_RANDOMIZER_BYTES is the number of bytes of uniformly -// random entropy necessary to generate a signature in randomized mode. -#define DILITHIUM_SIGNATURE_RANDOMIZER_BYTES 32 - -// DILITHIUM_generate_key_external_entropy generates a public/private key pair -// using the given seed, writes the encoded public key to -// |out_encoded_public_key| and sets |out_private_key| to the private key, -// returning 1 on success and 0 on failure. Returns 1 on success and 0 on -// failure. -OPENSSL_EXPORT int DILITHIUM_generate_key_external_entropy( - uint8_t out_encoded_public_key[DILITHIUM_PUBLIC_KEY_BYTES], - struct DILITHIUM_private_key *out_private_key, - const uint8_t entropy[DILITHIUM_GENERATE_KEY_ENTROPY]); - -// DILITHIUM_sign_deterministic generates a signature for the message |msg| of -// length |msg_len| using |private_key| following the deterministic algorithm, -// and writes the encoded signature to |out_encoded_signature|. Returns 1 on -// success and 0 on failure. -OPENSSL_EXPORT int DILITHIUM_sign_deterministic( - uint8_t out_encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const struct DILITHIUM_private_key *private_key, const uint8_t *msg, - size_t msg_len); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_DILITHIUM_INTERNAL_H diff --git a/Sources/CCryptoBoringSSL/crypto/dsa/dsa_asn1.cc b/Sources/CCryptoBoringSSL/crypto/dsa/dsa_asn1.cc index 010c3465..426e0a8e 100644 --- a/Sources/CCryptoBoringSSL/crypto/dsa/dsa_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/dsa/dsa_asn1.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2000. */ +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2000. */ /* ==================================================================== * Copyright (c) 2000-2005 The OpenSSL Project. All rights reserved. * diff --git a/Sources/CCryptoBoringSSL/crypto/dsa/internal.h b/Sources/CCryptoBoringSSL/crypto/dsa/internal.h index eb537d6f..4002cbc7 100644 --- a/Sources/CCryptoBoringSSL/crypto/dsa/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/dsa/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/ec_extra/ec_derive.cc b/Sources/CCryptoBoringSSL/crypto/ec_extra/ec_derive.cc index fdbe521b..1dd7336d 100644 --- a/Sources/CCryptoBoringSSL/crypto/ec_extra/ec_derive.cc +++ b/Sources/CCryptoBoringSSL/crypto/ec_extra/ec_derive.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/ec_extra/hash_to_curve.cc b/Sources/CCryptoBoringSSL/crypto/ec_extra/hash_to_curve.cc index 2e9b132c..8e20a98f 100644 --- a/Sources/CCryptoBoringSSL/crypto/ec_extra/hash_to_curve.cc +++ b/Sources/CCryptoBoringSSL/crypto/ec_extra/hash_to_curve.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/ec_extra/internal.h b/Sources/CCryptoBoringSSL/crypto/ec_extra/internal.h index 7df60e51..111b7194 100644 --- a/Sources/CCryptoBoringSSL/crypto/ec_extra/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/ec_extra/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/engine/engine.cc b/Sources/CCryptoBoringSSL/crypto/engine/engine.cc index 8fd20174..622415ef 100644 --- a/Sources/CCryptoBoringSSL/crypto/engine/engine.cc +++ b/Sources/CCryptoBoringSSL/crypto/engine/engine.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/err/err.cc b/Sources/CCryptoBoringSSL/crypto/err/err.cc index 54d04a58..bacf0cdb 100644 --- a/Sources/CCryptoBoringSSL/crypto/err/err.cc +++ b/Sources/CCryptoBoringSSL/crypto/err/err.cc @@ -740,7 +740,6 @@ static void err_add_error_vdata(unsigned num, va_list args) { assert(0); // should not be possible. } } - va_end(args); err_set_error_data(buf); } diff --git a/Sources/CCryptoBoringSSL/crypto/err/internal.h b/Sources/CCryptoBoringSSL/crypto/err/internal.h index c205bc91..2b6befc6 100644 --- a/Sources/CCryptoBoringSSL/crypto/err/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/err/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_ec.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_ec.cc index 940a2643..c7fe0db4 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_ec.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_ec.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_ec_asn1.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_ec_asn1.cc index a298d440..e6ca0924 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_ec_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_ec_asn1.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519.cc index 35214fa1..5ff556d1 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519_asn1.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519_asn1.cc index d7264807..4384e511 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_ed25519_asn1.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_hkdf.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_hkdf.cc index 7b3381e3..7eef9d9c 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_hkdf.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_hkdf.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_rsa.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_rsa.cc index 6c2c3db9..a7e08208 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_rsa.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_rsa.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_rsa_asn1.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_rsa_asn1.cc index eb01b6f1..5b21467e 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_rsa_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_rsa_asn1.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_x25519.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_x25519.cc index c3feef52..852f9244 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_x25519.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_x25519.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/p_x25519_asn1.cc b/Sources/CCryptoBoringSSL/crypto/evp/p_x25519_asn1.cc index 62f49033..54bfc48c 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/p_x25519_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/p_x25519_asn1.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/evp/pbkdf.cc b/Sources/CCryptoBoringSSL/crypto/evp/pbkdf.cc index c5d41284..5e7781b6 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/pbkdf.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/pbkdf.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/evp/scrypt.cc b/Sources/CCryptoBoringSSL/crypto/evp/scrypt.cc index 39b0bb02..a850965d 100644 --- a/Sources/CCryptoBoringSSL/crypto/evp/scrypt.cc +++ b/Sources/CCryptoBoringSSL/crypto/evp/scrypt.cc @@ -140,7 +140,7 @@ static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, // SCRYPT_MAX_MEM is the default maximum memory that may be allocated by // |EVP_PBE_scrypt|. -#define SCRYPT_MAX_MEM (1024 * 1024 * 32) +#define SCRYPT_MAX_MEM (1024 * 1024 * 65) int EVP_PBE_scrypt(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint64_t N, uint64_t r, diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/aes_nohw.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/aes_nohw.cc.inc index 59c4d72c..4770c26a 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/aes_nohw.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/aes_nohw.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/internal.h index 0ad8f8ff..28bdbf1e 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/aes/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm.cc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm.cc index 51f74d07..f522ff94 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm.cc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -86,6 +86,8 @@ #include "ecdsa/ecdsa.cc.inc" #include "hkdf/hkdf.cc.inc" #include "hmac/hmac.cc.inc" +#include "keccak/keccak.cc.inc" +#include "mldsa/mldsa.cc.inc" #include "modes/cbc.cc.inc" #include "modes/cfb.cc.inc" #include "modes/ctr.cc.inc" diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm_interface.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm_interface.h index e087a8a2..4f7974cd 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm_interface.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bcm_interface.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -34,23 +34,25 @@ extern "C" { // FIPS service indicator. For the moment, the official service indicator // remains the counter, not these values. Once we fully transition to // these return values from bcm we will change that. -enum bcm_infallible_t { - bcm_infallible_approved, - bcm_infallible_not_approved, +enum class bcm_infallible_t { + approved, + not_approved, }; -enum bcm_status_t { - bcm_status_approved, - bcm_status_not_approved, - - // Failure codes, which must all be negative. - bcm_status_failure, +enum class bcm_status_t { + approved, + not_approved, + failure, }; typedef enum bcm_status_t bcm_status; typedef enum bcm_infallible_t bcm_infallible; OPENSSL_INLINE int bcm_success(bcm_status status) { - return status == bcm_status_approved || status == bcm_status_not_approved; + return status == bcm_status::approved || status == bcm_status::not_approved; +} + +OPENSSL_INLINE bcm_status_t bcm_as_approved_status(int result) { + return result ? bcm_status::approved : bcm_status::failure; } @@ -237,6 +239,204 @@ bcm_infallible BCM_sha512_256_final(uint8_t out[BCM_SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha); +// ML-DSA +// +// Where not commented, these functions have the same signature as the +// corresponding public function. + +// BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES is the number of bytes of uniformly +// random entropy necessary to generate a signature in randomized mode. +#define BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES 32 + +// BCM_MLDSA_SEED_BYTES is the number of bytes in an ML-DSA seed value. +#define BCM_MLDSA_SEED_BYTES 32 + +// BCM_MLDSA65_PRIVATE_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 +// private key. +#define BCM_MLDSA65_PRIVATE_KEY_BYTES 4032 + +// BCM_MLDSA65_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 +// public key. +#define BCM_MLDSA65_PUBLIC_KEY_BYTES 1952 + +// BCM_MLDSA65_SIGNATURE_BYTES is the number of bytes in an encoded ML-DSA-65 +// signature. +#define BCM_MLDSA65_SIGNATURE_BYTES 3309 + +struct BCM_mldsa65_private_key { + union { + uint8_t bytes[32 + 32 + 64 + 256 * 4 * (5 + 6 + 6)]; + uint32_t alignment; + } opaque; +}; + +struct BCM_mldsa65_public_key { + union { + uint8_t bytes[32 + 64 + 256 * 4 * 6]; + uint32_t alignment; + } opaque; +}; + +OPENSSL_EXPORT bcm_status BCM_mldsa65_generate_key( + uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], + uint8_t out_seed[BCM_MLDSA_SEED_BYTES], + struct BCM_mldsa65_private_key *out_private_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_private_key_from_seed( + struct BCM_mldsa65_private_key *out_private_key, + const uint8_t seed[BCM_MLDSA_SEED_BYTES]); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_public_from_private( + struct BCM_mldsa65_public_key *out_public_key, + const struct BCM_mldsa65_private_key *private_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_sign( + uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_verify( + const struct BCM_mldsa65_public_key *public_key, + const uint8_t signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_marshal_public_key( + CBB *out, const struct BCM_mldsa65_public_key *public_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_parse_public_key( + struct BCM_mldsa65_public_key *public_key, CBS *in); + +OPENSSL_EXPORT bcm_status BCM_mldsa65_parse_private_key( + struct BCM_mldsa65_private_key *private_key, CBS *in); + +// BCM_mldsa65_generate_key_external_entropy generates a public/private key pair +// using the given seed, writes the encoded public key to +// |out_encoded_public_key| and sets |out_private_key| to the private key. +OPENSSL_EXPORT bcm_status BCM_mldsa65_generate_key_external_entropy( + uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], + struct BCM_mldsa65_private_key *out_private_key, + const uint8_t entropy[BCM_MLDSA_SEED_BYTES]); + +// BCM_mldsa5_sign_internal signs |msg| using |private_key| and writes the +// signature to |out_encoded_signature|. The |context_prefix| and |context| are +// prefixed to the message, in that order, before signing. The |randomizer| +// value can be set to zero bytes in order to make a deterministic signature, or +// else filled with entropy for the usual |MLDSA_sign| behavior. +OPENSSL_EXPORT bcm_status BCM_mldsa65_sign_internal( + uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, + const uint8_t *context, size_t context_len, + const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]); + +// BCM_mldsa5_verify_internal verifies that |encoded_signature| is a valid +// signature of |msg| by |public_key|. The |context_prefix| and |context| are +// prefixed to the message before verification, in that order. +OPENSSL_EXPORT bcm_status BCM_mldsa65_verify_internal( + const struct BCM_mldsa65_public_key *public_key, + const uint8_t encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, + size_t context_prefix_len, const uint8_t *context, size_t context_len); + +// BCM_mldsa65_marshal_private_key serializes |private_key| to |out| in the +// NIST format for ML-DSA-65 private keys. +OPENSSL_EXPORT bcm_status BCM_mldsa65_marshal_private_key( + CBB *out, const struct BCM_mldsa65_private_key *private_key); + + +// BCM_MLDSA87_PRIVATE_KEY_BYTES is the number of bytes in an encoded ML-DSA-87 +// private key. +#define BCM_MLDSA87_PRIVATE_KEY_BYTES 4896 + +// BCM_MLDSA87_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-DSA-87 +// public key. +#define BCM_MLDSA87_PUBLIC_KEY_BYTES 2592 + +// BCM_MLDSA87_SIGNATURE_BYTES is the number of bytes in an encoded ML-DSA-87 +// signature. +#define BCM_MLDSA87_SIGNATURE_BYTES 4627 + +struct BCM_mldsa87_private_key { + union { + uint8_t bytes[32 + 32 + 64 + 256 * 4 * (7 + 8 + 8)]; + uint32_t alignment; + } opaque; +}; + +struct BCM_mldsa87_public_key { + union { + uint8_t bytes[32 + 64 + 256 * 4 * 8]; + uint32_t alignment; + } opaque; +}; + +OPENSSL_EXPORT bcm_status BCM_mldsa87_generate_key( + uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], + uint8_t out_seed[BCM_MLDSA_SEED_BYTES], + struct BCM_mldsa87_private_key *out_private_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_private_key_from_seed( + struct BCM_mldsa87_private_key *out_private_key, + const uint8_t seed[BCM_MLDSA_SEED_BYTES]); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_public_from_private( + struct BCM_mldsa87_public_key *out_public_key, + const struct BCM_mldsa87_private_key *private_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_sign( + uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len); + +OPENSSL_EXPORT bcm_status +BCM_mldsa87_verify(const struct BCM_mldsa87_public_key *public_key, + const uint8_t *signature, const uint8_t *msg, size_t msg_len, + const uint8_t *context, size_t context_len); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_marshal_public_key( + CBB *out, const struct BCM_mldsa87_public_key *public_key); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_parse_public_key( + struct BCM_mldsa87_public_key *public_key, CBS *in); + +OPENSSL_EXPORT bcm_status BCM_mldsa87_parse_private_key( + struct BCM_mldsa87_private_key *private_key, CBS *in); + +// BCM_mldsa87_generate_key_external_entropy generates a public/private key pair +// using the given seed, writes the encoded public key to +// |out_encoded_public_key| and sets |out_private_key| to the private key. +OPENSSL_EXPORT bcm_status BCM_mldsa87_generate_key_external_entropy( + uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], + struct BCM_mldsa87_private_key *out_private_key, + const uint8_t entropy[BCM_MLDSA_SEED_BYTES]); + +// BCM_mldsa87_sign_internal signs |msg| using |private_key| and writes the +// signature to |out_encoded_signature|. The |context_prefix| and |context| are +// prefixed to the message, in that order, before signing. The |randomizer| +// value can be set to zero bytes in order to make a deterministic signature, or +// else filled with entropy for the usual |MLDSA_sign| behavior. +OPENSSL_EXPORT bcm_status BCM_mldsa87_sign_internal( + uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, + const uint8_t *context, size_t context_len, + const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]); + +// BCM_mldsa87_verify_internal verifies that |encoded_signature| is a valid +// signature of |msg| by |public_key|. The |context_prefix| and |context| are +// prefixed to the message before verification, in that order. +OPENSSL_EXPORT bcm_status BCM_mldsa87_verify_internal( + const struct BCM_mldsa87_public_key *public_key, + const uint8_t encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, + size_t context_prefix_len, const uint8_t *context, size_t context_len); + +// BCM_mldsa87_marshal_private_key serializes |private_key| to |out| in the +// NIST format for ML-DSA-87 private keys. +OPENSSL_EXPORT bcm_status BCM_mldsa87_marshal_private_key( + CBB *out, const struct BCM_mldsa87_private_key *private_key); + + #if defined(__cplusplus) } // extern C #endif diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/div_extra.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/div_extra.cc.inc index 41c5dbdc..2ce08f9e 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/div_extra.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/div_extra.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/gcd_extra.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/gcd_extra.cc.inc index 60d0364b..b6b636f6 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/gcd_extra.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/bn/gcd_extra.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/cipher/aead.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/cipher/aead.cc.inc index 3cdc1788..7de2b545 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/cipher/aead.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/cipher/aead.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/delocate.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/delocate.h index fe67dbd3..56511347 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/delocate.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/delocate.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/dh/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/dh/internal.h index aa7ab771..c5c6dec0 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/dh/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/dh/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/digest/digest.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/digest/digest.cc.inc index adf73b10..70f5de7b 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/digest/digest.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/digest/digest.cc.inc @@ -266,24 +266,27 @@ int EVP_Digest(const void *data, size_t count, uint8_t *out_md, return ret; } - -const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx) { +const EVP_MD *EVP_MD_CTX_get0_md(const EVP_MD_CTX *ctx) { if (ctx == NULL) { return NULL; } return ctx->digest; } +const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx) { + return EVP_MD_CTX_get0_md(ctx); +} + size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx) { - return EVP_MD_size(EVP_MD_CTX_md(ctx)); + return EVP_MD_size(EVP_MD_CTX_get0_md(ctx)); } size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx) { - return EVP_MD_block_size(EVP_MD_CTX_md(ctx)); + return EVP_MD_block_size(EVP_MD_CTX_get0_md(ctx)); } int EVP_MD_CTX_type(const EVP_MD_CTX *ctx) { - return EVP_MD_type(EVP_MD_CTX_md(ctx)); + return EVP_MD_type(EVP_MD_CTX_get0_md(ctx)); } int EVP_add_digest(const EVP_MD *digest) { return 1; } diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/digestsign/digestsign.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/digestsign/digestsign.cc.inc index 87f6ad20..3cf564b8 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/digestsign/digestsign.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/digestsign/digestsign.cc.inc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006,2007 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/builtin_curves.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/builtin_curves.h index 0b489ab5..61fd4f9d 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/builtin_curves.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/builtin_curves.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/felem.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/felem.cc.inc index 8212f9fa..50e8e088 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/felem.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/felem.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p224-64.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p224-64.cc.inc index 23fffbd2..cbbf3866 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p224-64.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p224-64.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256.cc.inc index 27bf8799..98167a00 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256_table.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256_table.h index d823d37f..04f8adb9 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256_table.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/p256_table.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/scalar.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/scalar.cc.inc index 7721b488..95d5ba0e 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/scalar.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/scalar.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/simple_mul.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/simple_mul.cc.inc index fbe53d33..06aa431f 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/simple_mul.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/simple_mul.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/util.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/util.cc.inc index 0b996fe2..be0fa1cf 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/util.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ec/util.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ecdsa/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ecdsa/internal.h index 519f6e18..2761714c 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/ecdsa/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/ecdsa/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Google Inc. +/* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/fips_shared_support.cc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/fips_shared_support.cc index 01de6a13..0f396e92 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/fips_shared_support.cc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/fips_shared_support.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/hkdf/hkdf.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/hkdf/hkdf.cc.inc index 5cf017cb..9f85c403 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/hkdf/hkdf.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/hkdf/hkdf.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/keccak/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/internal.h similarity index 98% rename from Sources/CCryptoBoringSSL/crypto/keccak/internal.h rename to Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/internal.h index 96da16d7..9cc7200d 100644 --- a/Sources/CCryptoBoringSSL/crypto/keccak/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/keccak/keccak.cc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/keccak.cc.inc similarity index 99% rename from Sources/CCryptoBoringSSL/crypto/keccak/keccak.cc rename to Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/keccak.cc.inc index d3dc7dff..d6b11536 100644 --- a/Sources/CCryptoBoringSSL/crypto/keccak/keccak.cc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/keccak/keccak.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,7 +17,7 @@ #include #include -#include "../internal.h" +#include "../../internal.h" #include "./internal.h" diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/mldsa/mldsa.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/mldsa/mldsa.cc.inc new file mode 100644 index 00000000..d67f6fef --- /dev/null +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/mldsa/mldsa.cc.inc @@ -0,0 +1,2031 @@ +/* Copyright 2014 The BoringSSL Authors + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include +#include + +#include +#include +#include + +#include "../../internal.h" +#include "../bcm_interface.h" +#include "../keccak/internal.h" + +namespace mldsa { +namespace { + +constexpr int kDegree = 256; +constexpr int kRhoBytes = 32; +constexpr int kSigmaBytes = 64; +constexpr int kKBytes = 32; +constexpr int kTrBytes = 64; +constexpr int kMuBytes = 64; +constexpr int kRhoPrimeBytes = 64; + +// 2^23 - 2^13 + 1 +constexpr uint32_t kPrime = 8380417; +// Inverse of -kPrime modulo 2^32 +constexpr uint32_t kPrimeNegInverse = 4236238847; +constexpr int kDroppedBits = 13; +constexpr uint32_t kHalfPrime = (kPrime - 1) / 2; +constexpr uint32_t kGamma2 = (kPrime - 1) / 32; +// 256^-1 mod kPrime, in Montgomery form. +constexpr uint32_t kInverseDegreeMontgomery = 41978; + +// Constants that vary depending on ML-DSA size. +// +// These are implemented as templates which take the K parameter to distinguish +// the ML-DSA sizes. (At the time of writing, `if constexpr` was not available.) +// +// TODO(crbug.com/42290600): Switch this to `if constexpr` when C++17 is +// available. + +template +constexpr size_t public_key_bytes(); + +template <> +constexpr size_t public_key_bytes<6>() { + return BCM_MLDSA65_PUBLIC_KEY_BYTES; +} + +template <> +constexpr size_t public_key_bytes<8>() { + return BCM_MLDSA87_PUBLIC_KEY_BYTES; +} + +template +constexpr size_t signature_bytes(); + +template <> +constexpr size_t signature_bytes<6>() { + return BCM_MLDSA65_SIGNATURE_BYTES; +} + +template <> +constexpr size_t signature_bytes<8>() { + return BCM_MLDSA87_SIGNATURE_BYTES; +} + +template +constexpr int tau(); + +template <> +constexpr int tau<6>() { + return 49; +} + +template <> +constexpr int tau<8>() { + return 60; +} + +template +constexpr int lambda_bytes(); + +template <> +constexpr int lambda_bytes<6>() { + return 192 / 8; +} + +template <> +constexpr int lambda_bytes<8>() { + return 256 / 8; +} + +template +constexpr int gamma1(); + +template <> +constexpr int gamma1<6>() { + return 1 << 19; +} + +template <> +constexpr int gamma1<8>() { + return 1 << 19; +} + +template +constexpr int beta(); + +template <> +constexpr int beta<6>() { + return 196; +} + +template <> +constexpr int beta<8>() { + return 120; +} + +template +constexpr int omega(); + +template <> +constexpr int omega<6>() { + return 55; +} + +template <> +constexpr int omega<8>() { + return 75; +} + +template +constexpr int eta(); + +template <> +constexpr int eta<6>() { + return 4; +} + +template <> +constexpr int eta<8>() { + return 2; +} + +template +constexpr int plus_minus_eta_bitlen(); + +template <> +constexpr int plus_minus_eta_bitlen<6>() { + return 4; +} + +template <> +constexpr int plus_minus_eta_bitlen<8>() { + return 3; +} + +// Fundamental types. + +typedef struct scalar { + uint32_t c[kDegree]; +} scalar; + +template +struct vector { + scalar v[K]; +}; + +template +struct matrix { + scalar v[K][L]; +}; + +/* Arithmetic */ + +// This bit of Python will be referenced in some of the following comments: +// +// q = 8380417 +// # Inverse of -q modulo 2^32 +// q_neg_inverse = 4236238847 +// # 2^64 modulo q +// montgomery_square = 2365951 +// +// def bitreverse(i): +// ret = 0 +// for n in range(8): +// bit = i & 1 +// ret <<= 1 +// ret |= bit +// i >>= 1 +// return ret +// +// def montgomery_reduce(x): +// a = (x * q_neg_inverse) % 2**32 +// b = x + a * q +// assert b & 0xFFFF_FFFF == 0 +// c = b >> 32 +// assert c < q +// return c +// +// def montgomery_transform(x): +// return montgomery_reduce(x * montgomery_square) + +// kNTTRootsMontgomery = [ +// montgomery_transform(pow(1753, bitreverse(i), q)) for i in range(256) +// ] +static const uint32_t kNTTRootsMontgomery[256] = { + 4193792, 25847, 5771523, 7861508, 237124, 7602457, 7504169, 466468, + 1826347, 2353451, 8021166, 6288512, 3119733, 5495562, 3111497, 2680103, + 2725464, 1024112, 7300517, 3585928, 7830929, 7260833, 2619752, 6271868, + 6262231, 4520680, 6980856, 5102745, 1757237, 8360995, 4010497, 280005, + 2706023, 95776, 3077325, 3530437, 6718724, 4788269, 5842901, 3915439, + 4519302, 5336701, 3574422, 5512770, 3539968, 8079950, 2348700, 7841118, + 6681150, 6736599, 3505694, 4558682, 3507263, 6239768, 6779997, 3699596, + 811944, 531354, 954230, 3881043, 3900724, 5823537, 2071892, 5582638, + 4450022, 6851714, 4702672, 5339162, 6927966, 3475950, 2176455, 6795196, + 7122806, 1939314, 4296819, 7380215, 5190273, 5223087, 4747489, 126922, + 3412210, 7396998, 2147896, 2715295, 5412772, 4686924, 7969390, 5903370, + 7709315, 7151892, 8357436, 7072248, 7998430, 1349076, 1852771, 6949987, + 5037034, 264944, 508951, 3097992, 44288, 7280319, 904516, 3958618, + 4656075, 8371839, 1653064, 5130689, 2389356, 8169440, 759969, 7063561, + 189548, 4827145, 3159746, 6529015, 5971092, 8202977, 1315589, 1341330, + 1285669, 6795489, 7567685, 6940675, 5361315, 4499357, 4751448, 3839961, + 2091667, 3407706, 2316500, 3817976, 5037939, 2244091, 5933984, 4817955, + 266997, 2434439, 7144689, 3513181, 4860065, 4621053, 7183191, 5187039, + 900702, 1859098, 909542, 819034, 495491, 6767243, 8337157, 7857917, + 7725090, 5257975, 2031748, 3207046, 4823422, 7855319, 7611795, 4784579, + 342297, 286988, 5942594, 4108315, 3437287, 5038140, 1735879, 203044, + 2842341, 2691481, 5790267, 1265009, 4055324, 1247620, 2486353, 1595974, + 4613401, 1250494, 2635921, 4832145, 5386378, 1869119, 1903435, 7329447, + 7047359, 1237275, 5062207, 6950192, 7929317, 1312455, 3306115, 6417775, + 7100756, 1917081, 5834105, 7005614, 1500165, 777191, 2235880, 3406031, + 7838005, 5548557, 6709241, 6533464, 5796124, 4656147, 594136, 4603424, + 6366809, 2432395, 2454455, 8215696, 1957272, 3369112, 185531, 7173032, + 5196991, 162844, 1616392, 3014001, 810149, 1652634, 4686184, 6581310, + 5341501, 3523897, 3866901, 269760, 2213111, 7404533, 1717735, 472078, + 7953734, 1723600, 6577327, 1910376, 6712985, 7276084, 8119771, 4546524, + 5441381, 6144432, 7959518, 6094090, 183443, 7403526, 1612842, 4834730, + 7826001, 3919660, 8332111, 7018208, 3937738, 1400424, 7534263, 1976782}; + +// Reduces x mod kPrime in constant time, where 0 <= x < 2*kPrime. +uint32_t reduce_once(uint32_t x) { + declassify_assert(x < 2 * kPrime); + // return x < kPrime ? x : x - kPrime; + return constant_time_select_int(constant_time_lt_w(x, kPrime), x, x - kPrime); +} + +// Returns the absolute value in constant time. +uint32_t abs_signed(uint32_t x) { + // return is_positive(x) ? x : -x; + // Note: MSVC doesn't like applying the unary minus operator to unsigned types + // (warning C4146), so we write the negation as a bitwise not plus one + // (assuming two's complement representation). + return constant_time_select_int(constant_time_lt_w(x, 0x80000000), x, 0u - x); +} + +// Returns the absolute value modulo kPrime. +uint32_t abs_mod_prime(uint32_t x) { + declassify_assert(x < kPrime); + // return x > kHalfPrime ? kPrime - x : x; + return constant_time_select_int(constant_time_lt_w(kHalfPrime, x), kPrime - x, + x); +} + +// Returns the maximum of two values in constant time. +uint32_t maximum(uint32_t x, uint32_t y) { + // return x < y ? y : x; + return constant_time_select_int(constant_time_lt_w(x, y), y, x); +} + +uint32_t mod_sub(uint32_t a, uint32_t b) { + declassify_assert(a < kPrime); + declassify_assert(b < kPrime); + return reduce_once(kPrime + a - b); +} + +void scalar_add(scalar *out, const scalar *lhs, const scalar *rhs) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); + } +} + +void scalar_sub(scalar *out, const scalar *lhs, const scalar *rhs) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = mod_sub(lhs->c[i], rhs->c[i]); + } +} + +uint32_t reduce_montgomery(uint64_t x) { + declassify_assert(x <= ((uint64_t)kPrime << 32)); + uint64_t a = (uint32_t)x * kPrimeNegInverse; + uint64_t b = x + a * kPrime; + declassify_assert((b & 0xffffffff) == 0); + uint32_t c = b >> 32; + return reduce_once(c); +} + +// Multiply two scalars in the number theoretically transformed state. +void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = reduce_montgomery((uint64_t)lhs->c[i] * (uint64_t)rhs->c[i]); + } +} + +// In place number theoretic transform of a given scalar. +// +// FIPS 204, Algorithm 41 (`NTT`). +static void scalar_ntt(scalar *s) { + // Step: 1, 2, 4, 8, ..., 128 + // Offset: 128, 64, 32, 16, ..., 1 + int offset = kDegree; + for (int step = 1; step < kDegree; step <<= 1) { + offset >>= 1; + int k = 0; + for (int i = 0; i < step; i++) { + assert(k == 2 * offset * i); + const uint32_t step_root = kNTTRootsMontgomery[step + i]; + for (int j = k; j < k + offset; j++) { + uint32_t even = s->c[j]; + // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. + // |step_root| < kPrime because it's static data. |s->c[...]| is < + // kPrime by the invariants of that struct. + uint32_t odd = + reduce_montgomery((uint64_t)step_root * (uint64_t)s->c[j + offset]); + s->c[j] = reduce_once(odd + even); + s->c[j + offset] = mod_sub(even, odd); + } + k += 2 * offset; + } + } +} + +// In place inverse number theoretic transform of a given scalar. +// +// FIPS 204, Algorithm 42 (`NTT^-1`). +void scalar_inverse_ntt(scalar *s) { + // Step: 128, 64, 32, 16, ..., 1 + // Offset: 1, 2, 4, 8, ..., 128 + int step = kDegree; + for (int offset = 1; offset < kDegree; offset <<= 1) { + step >>= 1; + int k = 0; + for (int i = 0; i < step; i++) { + assert(k == 2 * offset * i); + const uint32_t step_root = + kPrime - kNTTRootsMontgomery[step + (step - 1 - i)]; + for (int j = k; j < k + offset; j++) { + uint32_t even = s->c[j]; + uint32_t odd = s->c[j + offset]; + s->c[j] = reduce_once(odd + even); + + // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. + // kPrime + even < 2*kPrime because |even| < kPrime, by the invariants + // of that structure. Thus kPrime + even - odd < 2*kPrime because odd >= + // 0, because it's unsigned and less than kPrime. Lastly step_root < + // kPrime, because |kNTTRootsMontgomery| is static data. + s->c[j + offset] = reduce_montgomery((uint64_t)step_root * + (uint64_t)(kPrime + even - odd)); + } + k += 2 * offset; + } + } + for (int i = 0; i < kDegree; i++) { + s->c[i] = reduce_montgomery((uint64_t)s->c[i] * + (uint64_t)kInverseDegreeMontgomery); + } +} + +template +void vector_zero(vector *out) { + OPENSSL_memset(out, 0, sizeof(*out)); +} + +template +void vector_add(vector *out, const vector *lhs, const vector *rhs) { + for (int i = 0; i < X; i++) { + scalar_add(&out->v[i], &lhs->v[i], &rhs->v[i]); + } +} + +template +void vector_sub(vector *out, const vector *lhs, const vector *rhs) { + for (int i = 0; i < X; i++) { + scalar_sub(&out->v[i], &lhs->v[i], &rhs->v[i]); + } +} + +template +void vector_mult_scalar(vector *out, const vector *lhs, + const scalar *rhs) { + for (int i = 0; i < X; i++) { + scalar_mult(&out->v[i], &lhs->v[i], rhs); + } +} + +template +void vector_ntt(vector *a) { + for (int i = 0; i < X; i++) { + scalar_ntt(&a->v[i]); + } +} + +template +void vector_inverse_ntt(vector *a) { + for (int i = 0; i < X; i++) { + scalar_inverse_ntt(&a->v[i]); + } +} + +template +void matrix_mult(vector *out, const matrix *m, const vector *a) { + vector_zero(out); + for (int i = 0; i < K; i++) { + for (int j = 0; j < L; j++) { + scalar product; + scalar_mult(&product, &m->v[i][j], &a->v[j]); + scalar_add(&out->v[i], &out->v[i], &product); + } + } +} + +/* Rounding & hints */ + +// FIPS 204, Algorithm 35 (`Power2Round`). +void power2_round(uint32_t *r1, uint32_t *r0, uint32_t r) { + *r1 = r >> kDroppedBits; + *r0 = r - (*r1 << kDroppedBits); + + uint32_t r0_adjusted = mod_sub(*r0, 1 << kDroppedBits); + uint32_t r1_adjusted = *r1 + 1; + + // Mask is set iff r0 > 2^(dropped_bits - 1). + crypto_word_t mask = + constant_time_lt_w((uint32_t)(1 << (kDroppedBits - 1)), *r0); + // r0 = mask ? r0_adjusted : r0 + *r0 = constant_time_select_int(mask, r0_adjusted, *r0); + // r1 = mask ? r1_adjusted : r1 + *r1 = constant_time_select_int(mask, r1_adjusted, *r1); +} + +// Scale back previously rounded value. +void scale_power2_round(uint32_t *out, uint32_t r1) { + // Pre-condition: 0 <= r1 <= 2^10 - 1 + assert(r1 < (1u << 10)); + + *out = r1 << kDroppedBits; + + // Post-condition: 0 <= out <= 2^23 - 2^13 = kPrime - 1 + assert(*out < kPrime); +} + +// FIPS 204, Algorithm 37 (`HighBits`). +uint32_t high_bits(uint32_t x) { + // Reference description (given 0 <= x < q): + // + // ``` + // int32_t r0 = x mod+- (2 * kGamma2); + // if (x - r0 == q - 1) { + // return 0; + // } else { + // return (x - r0) / (2 * kGamma2); + // } + // ``` + // + // Below is the formula taken from the reference implementation. + // + // Here, kGamma2 == 2^18 - 2^8 + // This returns ((ceil(x / 2^7) * (2^10 + 1) + 2^21) / 2^22) mod 2^4 + uint32_t r1 = (x + 127) >> 7; + r1 = (r1 * 1025 + (1 << 21)) >> 22; + r1 &= 15; + return r1; +} + +// FIPS 204, Algorithm 36 (`Decompose`). +void decompose(uint32_t *r1, int32_t *r0, uint32_t r) { + *r1 = high_bits(r); + + *r0 = r; + *r0 -= *r1 * 2 * (int32_t)kGamma2; + *r0 -= (((int32_t)kHalfPrime - *r0) >> 31) & (int32_t)kPrime; +} + +// FIPS 204, Algorithm 38 (`LowBits`). +int32_t low_bits(uint32_t x) { + uint32_t r1; + int32_t r0; + decompose(&r1, &r0, x); + return r0; +} + +// FIPS 204, Algorithm 39 (`MakeHint`). +// +// In the spec this takes two arguments, z and r, and is called with +// z = -ct0 +// r = w - cs2 + ct0 +// +// It then computes HighBits (algorithm 37) of z and z+r. But z+r is just w - +// cs2, so this takes three arguments and saves an addition. +int32_t make_hint(uint32_t ct0, uint32_t cs2, uint32_t w) { + uint32_t r_plus_z = mod_sub(w, cs2); + uint32_t r = reduce_once(r_plus_z + ct0); + return high_bits(r) != high_bits(r_plus_z); +} + +// FIPS 204, Algorithm 40 (`UseHint`). +uint32_t use_hint_vartime(uint32_t h, uint32_t r) { + uint32_t r1; + int32_t r0; + decompose(&r1, &r0, r); + + if (h) { + if (r0 > 0) { + // m = 16, thus |mod m| in the spec turns into |& 15|. + return (r1 + 1) & 15; + } else { + return (r1 - 1) & 15; + } + } + return r1; +} + +void scalar_power2_round(scalar *s1, scalar *s0, const scalar *s) { + for (int i = 0; i < kDegree; i++) { + power2_round(&s1->c[i], &s0->c[i], s->c[i]); + } +} + +void scalar_scale_power2_round(scalar *out, const scalar *in) { + for (int i = 0; i < kDegree; i++) { + scale_power2_round(&out->c[i], in->c[i]); + } +} + +void scalar_high_bits(scalar *out, const scalar *in) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = high_bits(in->c[i]); + } +} + +void scalar_low_bits(scalar *out, const scalar *in) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = low_bits(in->c[i]); + } +} + +void scalar_max(uint32_t *max, const scalar *s) { + for (int i = 0; i < kDegree; i++) { + uint32_t abs = abs_mod_prime(s->c[i]); + *max = maximum(*max, abs); + } +} + +void scalar_max_signed(uint32_t *max, const scalar *s) { + for (int i = 0; i < kDegree; i++) { + uint32_t abs = abs_signed(s->c[i]); + *max = maximum(*max, abs); + } +} + +void scalar_make_hint(scalar *out, const scalar *ct0, const scalar *cs2, + const scalar *w) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = make_hint(ct0->c[i], cs2->c[i], w->c[i]); + } +} + +void scalar_use_hint_vartime(scalar *out, const scalar *h, const scalar *r) { + for (int i = 0; i < kDegree; i++) { + out->c[i] = use_hint_vartime(h->c[i], r->c[i]); + } +} + +template +void vector_power2_round(vector *t1, vector *t0, const vector *t) { + for (int i = 0; i < X; i++) { + scalar_power2_round(&t1->v[i], &t0->v[i], &t->v[i]); + } +} + +template +void vector_scale_power2_round(vector *out, const vector *in) { + for (int i = 0; i < X; i++) { + scalar_scale_power2_round(&out->v[i], &in->v[i]); + } +} + +template +void vector_high_bits(vector *out, const vector *in) { + for (int i = 0; i < X; i++) { + scalar_high_bits(&out->v[i], &in->v[i]); + } +} + +template +void vector_low_bits(vector *out, const vector *in) { + for (int i = 0; i < X; i++) { + scalar_low_bits(&out->v[i], &in->v[i]); + } +} + +template +uint32_t vector_max(const vector *a) { + uint32_t max = 0; + for (int i = 0; i < X; i++) { + scalar_max(&max, &a->v[i]); + } + return max; +} + +template +uint32_t vector_max_signed(const vector *a) { + uint32_t max = 0; + for (int i = 0; i < X; i++) { + scalar_max_signed(&max, &a->v[i]); + } + return max; +} + +// The input vector contains only zeroes and ones. +template +size_t vector_count_ones(const vector *a) { + size_t count = 0; + for (int i = 0; i < X; i++) { + for (int j = 0; j < kDegree; j++) { + count += a->v[i].c[j]; + } + } + return count; +} + +template +void vector_make_hint(vector *out, const vector *ct0, + const vector *cs2, const vector *w) { + for (int i = 0; i < X; i++) { + scalar_make_hint(&out->v[i], &ct0->v[i], &cs2->v[i], &w->v[i]); + } +} + +template +void vector_use_hint_vartime(vector *out, const vector *h, + const vector *r) { + for (int i = 0; i < X; i++) { + scalar_use_hint_vartime(&out->v[i], &h->v[i], &r->v[i]); + } +} + +/* Bit packing */ + +// FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 4. +static void scalar_encode_4(uint8_t out[128], const scalar *s) { + // Every two elements lands on a byte boundary. + static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); + for (int i = 0; i < kDegree / 2; i++) { + uint32_t a = s->c[2 * i]; + uint32_t b = s->c[2 * i + 1]; + declassify_assert(a < 16); + declassify_assert(b < 16); + out[i] = a | (b << 4); + } +} + +// FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 10. +void scalar_encode_10(uint8_t out[320], const scalar *s) { + // Every four elements lands on a byte boundary. + static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); + for (int i = 0; i < kDegree / 4; i++) { + uint32_t a = s->c[4 * i]; + uint32_t b = s->c[4 * i + 1]; + uint32_t c = s->c[4 * i + 2]; + uint32_t d = s->c[4 * i + 3]; + declassify_assert(a < 1024); + declassify_assert(b < 1024); + declassify_assert(c < 1024); + declassify_assert(d < 1024); + out[5 * i] = (uint8_t)a; + out[5 * i + 1] = (uint8_t)((a >> 8) | (b << 2)); + out[5 * i + 2] = (uint8_t)((b >> 6) | (c << 4)); + out[5 * i + 3] = (uint8_t)((c >> 4) | (d << 6)); + out[5 * i + 4] = (uint8_t)(d >> 2); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 4 and b = 4. +void scalar_encode_signed_4_4(uint8_t out[128], const scalar *s) { + // Every two elements lands on a byte boundary. + static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); + for (int i = 0; i < kDegree / 2; i++) { + uint32_t a = mod_sub(4, s->c[2 * i]); + uint32_t b = mod_sub(4, s->c[2 * i + 1]); + declassify_assert(a < 16); + declassify_assert(b < 16); + out[i] = a | (b << 4); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 3 and b = 2. +static void scalar_encode_signed_3_2(uint8_t out[96], const scalar *s) { + static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); + for (int i = 0; i < kDegree / 8; i++) { + uint32_t a = mod_sub(2, s->c[8 * i]); + uint32_t b = mod_sub(2, s->c[8 * i + 1]); + uint32_t c = mod_sub(2, s->c[8 * i + 2]); + uint32_t d = mod_sub(2, s->c[8 * i + 3]); + uint32_t e = mod_sub(2, s->c[8 * i + 4]); + uint32_t f = mod_sub(2, s->c[8 * i + 5]); + uint32_t g = mod_sub(2, s->c[8 * i + 6]); + uint32_t h = mod_sub(2, s->c[8 * i + 7]); + uint32_t v = (h << 21) | (g << 18) | (f << 15) | (e << 12) | (d << 9) | + (c << 6) | (b << 3) | a; + uint8_t v_bytes[sizeof(v)]; + CRYPTO_store_u32_le(v_bytes, v); + OPENSSL_memcpy(&out[i * 3], v_bytes, 3); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 13 and b = +// 2^12. +void scalar_encode_signed_13_12(uint8_t out[416], const scalar *s) { + static const uint32_t kMax = 1u << 12; + // Every two elements lands on a byte boundary. + static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); + for (int i = 0; i < kDegree / 8; i++) { + uint32_t a = mod_sub(kMax, s->c[8 * i]); + uint32_t b = mod_sub(kMax, s->c[8 * i + 1]); + uint32_t c = mod_sub(kMax, s->c[8 * i + 2]); + uint32_t d = mod_sub(kMax, s->c[8 * i + 3]); + uint32_t e = mod_sub(kMax, s->c[8 * i + 4]); + uint32_t f = mod_sub(kMax, s->c[8 * i + 5]); + uint32_t g = mod_sub(kMax, s->c[8 * i + 6]); + uint32_t h = mod_sub(kMax, s->c[8 * i + 7]); + declassify_assert(a < (1u << 13)); + declassify_assert(b < (1u << 13)); + declassify_assert(c < (1u << 13)); + declassify_assert(d < (1u << 13)); + declassify_assert(e < (1u << 13)); + declassify_assert(f < (1u << 13)); + declassify_assert(g < (1u << 13)); + declassify_assert(h < (1u << 13)); + a |= b << 13; + a |= c << 26; + c >>= 6; + c |= d << 7; + c |= e << 20; + e >>= 12; + e |= f << 1; + e |= g << 14; + e |= h << 27; + h >>= 5; + OPENSSL_memcpy(&out[13 * i], &a, sizeof(a)); + OPENSSL_memcpy(&out[13 * i + 4], &c, sizeof(c)); + OPENSSL_memcpy(&out[13 * i + 8], &e, sizeof(e)); + OPENSSL_memcpy(&out[13 * i + 12], &h, 1); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 20 and b = +// 2^19. +void scalar_encode_signed_20_19(uint8_t out[640], const scalar *s) { + static const uint32_t kMax = 1u << 19; + // Every two elements lands on a byte boundary. + static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); + for (int i = 0; i < kDegree / 4; i++) { + uint32_t a = mod_sub(kMax, s->c[4 * i]); + uint32_t b = mod_sub(kMax, s->c[4 * i + 1]); + uint32_t c = mod_sub(kMax, s->c[4 * i + 2]); + uint32_t d = mod_sub(kMax, s->c[4 * i + 3]); + declassify_assert(a < (1u << 20)); + declassify_assert(b < (1u << 20)); + declassify_assert(c < (1u << 20)); + declassify_assert(d < (1u << 20)); + a |= b << 20; + b >>= 12; + b |= c << 8; + b |= d << 28; + d >>= 4; + OPENSSL_memcpy(&out[10 * i], &a, sizeof(a)); + OPENSSL_memcpy(&out[10 * i + 4], &b, sizeof(b)); + OPENSSL_memcpy(&out[10 * i + 8], &d, 2); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). +void scalar_encode_signed(uint8_t *out, const scalar *s, int bits, + uint32_t max) { + if (bits == 3) { + assert(max == 2); + scalar_encode_signed_3_2(out, s); + } else if (bits == 4) { + assert(max == 4); + scalar_encode_signed_4_4(out, s); + } else if (bits == 20) { + assert(max == 1u << 19); + scalar_encode_signed_20_19(out, s); + } else { + assert(bits == 13); + assert(max == 1u << 12); + scalar_encode_signed_13_12(out, s); + } +} + +// FIPS 204, Algorithm 18 (`SimpleBitUnpack`). Specialized for bitlen(b) == 10. +void scalar_decode_10(scalar *out, const uint8_t in[320]) { + uint32_t v; + static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); + for (int i = 0; i < kDegree / 4; i++) { + OPENSSL_memcpy(&v, &in[5 * i], sizeof(v)); + out->c[4 * i] = v & 0x3ff; + out->c[4 * i + 1] = (v >> 10) & 0x3ff; + out->c[4 * i + 2] = (v >> 20) & 0x3ff; + out->c[4 * i + 3] = (v >> 30) | (((uint32_t)in[5 * i + 4]) << 2); + } +} + +// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 4 and b = +// 4. +int scalar_decode_signed_4_4(scalar *out, const uint8_t in[128]) { + uint32_t v; + static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); + for (int i = 0; i < kDegree / 8; i++) { + OPENSSL_memcpy(&v, &in[4 * i], sizeof(v)); + // None of the nibbles may be >= 9. So if the MSB of any nibble is set, none + // of the other bits may be set. First, select all the MSBs. + const uint32_t msbs = v & 0x88888888u; + // For each nibble where the MSB is set, form a mask of all the other bits. + const uint32_t mask = (msbs >> 1) | (msbs >> 2) | (msbs >> 3); + // A nibble is only out of range in the case of invalid input, in which case + // it is okay to leak the value. + if (constant_time_declassify_int((mask & v) != 0)) { + return 0; + } + + out->c[i * 8] = mod_sub(4, v & 15); + out->c[i * 8 + 1] = mod_sub(4, (v >> 4) & 15); + out->c[i * 8 + 2] = mod_sub(4, (v >> 8) & 15); + out->c[i * 8 + 3] = mod_sub(4, (v >> 12) & 15); + out->c[i * 8 + 4] = mod_sub(4, (v >> 16) & 15); + out->c[i * 8 + 5] = mod_sub(4, (v >> 20) & 15); + out->c[i * 8 + 6] = mod_sub(4, (v >> 24) & 15); + out->c[i * 8 + 7] = mod_sub(4, v >> 28); + } + return 1; +} + +// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 3 and b = +// 2. +static int scalar_decode_signed_3_2(scalar *out, const uint8_t in[96]) { + uint32_t v; + uint8_t v_bytes[sizeof(v)] = {0}; + static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); + for (int i = 0; i < kDegree / 8; i++) { + OPENSSL_memcpy(v_bytes, &in[3 * i], 3); + v = CRYPTO_load_u32_le(v_bytes); + // v contains 8, 3-bit values in the lower 24 bits. None of the values may + // be >= 5. So if the MSB of any triple is set, none of the other bits may + // be set. First, select all the MSBs. + const uint32_t msbs = v & 000044444444u; + // For each triple where the MSB is set, form a mask of all the other bits. + const uint32_t mask = (msbs >> 1) | (msbs >> 2); + // A triple is only out of range in the case of invalid input, in which case + // it is okay to leak the value. + if (constant_time_declassify_int((mask & v) != 0)) { + return 0; + } + + out->c[i * 8 + 0] = mod_sub(2, (v >> 0) & 7); + out->c[i * 8 + 1] = mod_sub(2, (v >> 3) & 7); + out->c[i * 8 + 2] = mod_sub(2, (v >> 6) & 7); + out->c[i * 8 + 3] = mod_sub(2, (v >> 9) & 7); + out->c[i * 8 + 4] = mod_sub(2, (v >> 12) & 7); + out->c[i * 8 + 5] = mod_sub(2, (v >> 15) & 7); + out->c[i * 8 + 6] = mod_sub(2, (v >> 18) & 7); + out->c[i * 8 + 7] = mod_sub(2, v >> 21); + } + return 1; +} + +// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 13 and b = +// 2^12. +void scalar_decode_signed_13_12(scalar *out, const uint8_t in[416]) { + static const uint32_t kMax = 1u << 12; + static const uint32_t k13Bits = (1u << 13) - 1; + static const uint32_t k7Bits = (1u << 7) - 1; + + uint32_t a, b, c; + uint8_t d; + static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); + for (int i = 0; i < kDegree / 8; i++) { + OPENSSL_memcpy(&a, &in[13 * i], sizeof(a)); + OPENSSL_memcpy(&b, &in[13 * i + 4], sizeof(b)); + OPENSSL_memcpy(&c, &in[13 * i + 8], sizeof(c)); + d = in[13 * i + 12]; + + // It's not possible for a 13-bit number to be out of range when the max is + // 2^12. + out->c[i * 8] = mod_sub(kMax, a & k13Bits); + out->c[i * 8 + 1] = mod_sub(kMax, (a >> 13) & k13Bits); + out->c[i * 8 + 2] = mod_sub(kMax, (a >> 26) | ((b & k7Bits) << 6)); + out->c[i * 8 + 3] = mod_sub(kMax, (b >> 7) & k13Bits); + out->c[i * 8 + 4] = mod_sub(kMax, (b >> 20) | ((c & 1) << 12)); + out->c[i * 8 + 5] = mod_sub(kMax, (c >> 1) & k13Bits); + out->c[i * 8 + 6] = mod_sub(kMax, (c >> 14) & k13Bits); + out->c[i * 8 + 7] = mod_sub(kMax, (c >> 27) | ((uint32_t)d) << 5); + } +} + +// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 20 and b = +// 2^19. +void scalar_decode_signed_20_19(scalar *out, const uint8_t in[640]) { + static const uint32_t kMax = 1u << 19; + static const uint32_t k20Bits = (1u << 20) - 1; + + uint32_t a, b; + uint16_t c; + static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); + for (int i = 0; i < kDegree / 4; i++) { + OPENSSL_memcpy(&a, &in[10 * i], sizeof(a)); + OPENSSL_memcpy(&b, &in[10 * i + 4], sizeof(b)); + OPENSSL_memcpy(&c, &in[10 * i + 8], sizeof(c)); + + // It's not possible for a 20-bit number to be out of range when the max is + // 2^19. + out->c[i * 4] = mod_sub(kMax, a & k20Bits); + out->c[i * 4 + 1] = mod_sub(kMax, (a >> 20) | ((b & 0xff) << 12)); + out->c[i * 4 + 2] = mod_sub(kMax, (b >> 8) & k20Bits); + out->c[i * 4 + 3] = mod_sub(kMax, (b >> 28) | ((uint32_t)c) << 4); + } +} + +// FIPS 204, Algorithm 19 (`BitUnpack`). +int scalar_decode_signed(scalar *out, const uint8_t *in, int bits, + uint32_t max) { + if (bits == 3) { + assert(max == 2); + return scalar_decode_signed_3_2(out, in); + } else if (bits == 4) { + assert(max == 4); + return scalar_decode_signed_4_4(out, in); + } else if (bits == 13) { + assert(max == (1u << 12)); + scalar_decode_signed_13_12(out, in); + return 1; + } else if (bits == 20) { + assert(max == (1u << 19)); + scalar_decode_signed_20_19(out, in); + return 1; + } else { + abort(); + } +} + +/* Expansion functions */ + +// FIPS 204, Algorithm 30 (`RejNTTPoly`). +// +// Rejection samples a Keccak stream to get uniformly distributed elements. This +// is used for matrix expansion and only operates on public inputs. +void scalar_from_keccak_vartime(scalar *out, + const uint8_t derived_seed[kRhoBytes + 2]) { + struct BORINGSSL_keccak_st keccak_ctx; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); + BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kRhoBytes + 2); + assert(keccak_ctx.squeeze_offset == 0); + assert(keccak_ctx.rate_bytes == 168); + static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); + + int done = 0; + while (done < kDegree) { + uint8_t block[168]; + BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); + for (size_t i = 0; i < sizeof(block) && done < kDegree; i += 3) { + // FIPS 204, Algorithm 14 (`CoeffFromThreeBytes`). + uint32_t value = (uint32_t)block[i] | ((uint32_t)block[i + 1] << 8) | + (((uint32_t)block[i + 2] & 0x7f) << 16); + if (value < kPrime) { + out->c[done++] = value; + } + } + } +} + +template +static bool coefficient_from_nibble(uint32_t nibble, uint32_t *result); + +template <> +bool coefficient_from_nibble<4>(uint32_t nibble, uint32_t *result) { + if (constant_time_declassify_int(nibble < 9)) { + *result = mod_sub(4, nibble); + return true; + } + return false; +} + +template <> +bool coefficient_from_nibble<2>(uint32_t nibble, uint32_t *result) { + if (constant_time_declassify_int(nibble < 15)) { + *result = mod_sub(2, nibble % 5); + return true; + } + return false; +} + +// FIPS 204, Algorithm 31 (`RejBoundedPoly`). +template +void scalar_uniform(scalar *out, const uint8_t derived_seed[kSigmaBytes + 2]) { + struct BORINGSSL_keccak_st keccak_ctx; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kSigmaBytes + 2); + assert(keccak_ctx.squeeze_offset == 0); + assert(keccak_ctx.rate_bytes == 136); + + int done = 0; + while (done < kDegree) { + uint8_t block[136]; + BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); + for (size_t i = 0; i < sizeof(block) && done < kDegree; ++i) { + uint32_t t0 = block[i] & 0x0F; + uint32_t t1 = block[i] >> 4; + // FIPS 204, Algorithm 15 (`CoefFromHalfByte`). Although both the input + // and output here are secret, it is OK to leak when we rejected a byte. + // Individual bytes of the SHAKE-256 stream are (indistiguishable from) + // independent of each other and the original seed, so leaking information + // about the rejected bytes does not reveal the input or output. + uint32_t v; + if (coefficient_from_nibble(t0, &v)) { + out->c[done++] = v; + } + if (done < kDegree && coefficient_from_nibble(t1, &v)) { + out->c[done++] = v; + } + } + } +} + +// FIPS 204, Algorithm 34 (`ExpandMask`), but just a single step. +void scalar_sample_mask(scalar *out, + const uint8_t derived_seed[kRhoPrimeBytes + 2]) { + uint8_t buf[640]; + BORINGSSL_keccak(buf, sizeof(buf), derived_seed, kRhoPrimeBytes + 2, + boringssl_shake256); + + scalar_decode_signed_20_19(out, buf); +} + +// FIPS 204, Algorithm 29 (`SampleInBall`). +void scalar_sample_in_ball_vartime(scalar *out, const uint8_t *seed, int len, + int tau) { + struct BORINGSSL_keccak_st keccak_ctx; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, seed, len); + assert(keccak_ctx.squeeze_offset == 0); + assert(keccak_ctx.rate_bytes == 136); + + uint8_t block[136]; + BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); + + uint64_t signs = CRYPTO_load_u64_le(block); + int offset = 8; + // SampleInBall implements a Fisher–Yates shuffle, which unavoidably leaks + // where the zeros are by memory access pattern. Although this leak happens + // before bad signatures are rejected, this is safe. See + // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/8d8f01ac_70af3f21/ + CONSTTIME_DECLASSIFY(block + offset, sizeof(block) - offset); + + OPENSSL_memset(out, 0, sizeof(*out)); + for (size_t i = kDegree - tau; i < kDegree; i++) { + size_t byte; + for (;;) { + if (offset == 136) { + BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); + // See above. + CONSTTIME_DECLASSIFY(block, sizeof(block)); + offset = 0; + } + + byte = block[offset++]; + if (byte <= i) { + break; + } + } + + out->c[i] = out->c[byte]; + out->c[byte] = mod_sub(1, 2 * (signs & 1)); + signs >>= 1; + } +} + +// FIPS 204, Algorithm 32 (`ExpandA`). +template +void matrix_expand(matrix *out, const uint8_t rho[kRhoBytes]) { + static_assert(K <= 0x100, "K must fit in 8 bits"); + static_assert(L <= 0x100, "L must fit in 8 bits"); + + uint8_t derived_seed[kRhoBytes + 2]; + OPENSSL_memcpy(derived_seed, rho, kRhoBytes); + for (int i = 0; i < K; i++) { + for (int j = 0; j < L; j++) { + derived_seed[kRhoBytes + 1] = (uint8_t)i; + derived_seed[kRhoBytes] = (uint8_t)j; + scalar_from_keccak_vartime(&out->v[i][j], derived_seed); + } + } +} + +// FIPS 204, Algorithm 33 (`ExpandS`). +template +void vector_expand_short(vector *s1, vector *s2, + const uint8_t sigma[kSigmaBytes]) { + static_assert(K <= 0x100, "K must fit in 8 bits"); + static_assert(L <= 0x100, "L must fit in 8 bits"); + static_assert(K + L <= 0x100, "K+L must fit in 8 bits"); + + uint8_t derived_seed[kSigmaBytes + 2]; + OPENSSL_memcpy(derived_seed, sigma, kSigmaBytes); + derived_seed[kSigmaBytes] = 0; + derived_seed[kSigmaBytes + 1] = 0; + for (int i = 0; i < L; i++) { + scalar_uniform()>(&s1->v[i], derived_seed); + ++derived_seed[kSigmaBytes]; + } + for (int i = 0; i < K; i++) { + scalar_uniform()>(&s2->v[i], derived_seed); + ++derived_seed[kSigmaBytes]; + } +} + +// FIPS 204, Algorithm 34 (`ExpandMask`). +template +void vector_expand_mask(vector *out, const uint8_t seed[kRhoPrimeBytes], + size_t kappa) { + assert(kappa + L <= 0x10000); + + uint8_t derived_seed[kRhoPrimeBytes + 2]; + OPENSSL_memcpy(derived_seed, seed, kRhoPrimeBytes); + for (int i = 0; i < L; i++) { + size_t index = kappa + i; + derived_seed[kRhoPrimeBytes] = index & 0xFF; + derived_seed[kRhoPrimeBytes + 1] = (index >> 8) & 0xFF; + scalar_sample_mask(&out->v[i], derived_seed); + } +} + +/* Encoding */ + +// FIPS 204, Algorithm 16 (`SimpleBitPack`). +// +// Encodes an entire vector into 32*K*|bits| bytes. Note that since 256 +// (kDegree) is divisible by 8, the individual vector entries will always fill a +// whole number of bytes, so we do not need to worry about bit packing here. +template +void vector_encode(uint8_t *out, const vector *a, int bits) { + if (bits == 4) { + for (int i = 0; i < K; i++) { + scalar_encode_4(out + i * bits * kDegree / 8, &a->v[i]); + } + } else { + assert(bits == 10); + for (int i = 0; i < K; i++) { + scalar_encode_10(out + i * bits * kDegree / 8, &a->v[i]); + } + } +} + +// FIPS 204, Algorithm 18 (`SimpleBitUnpack`). +template +void vector_decode_10(vector *out, const uint8_t *in) { + for (int i = 0; i < K; i++) { + scalar_decode_10(&out->v[i], in + i * 10 * kDegree / 8); + } +} + +// FIPS 204, Algorithm 17 (`BitPack`). +// +// Encodes an entire vector into 32*L*|bits| bytes. Note that since 256 +// (kDegree) is divisible by 8, the individual vector entries will always fill a +// whole number of bytes, so we do not need to worry about bit packing here. +template +void vector_encode_signed(uint8_t *out, const vector *a, int bits, + uint32_t max) { + for (int i = 0; i < X; i++) { + scalar_encode_signed(out + i * bits * kDegree / 8, &a->v[i], bits, max); + } +} + +template +int vector_decode_signed(vector *out, const uint8_t *in, int bits, + uint32_t max) { + for (int i = 0; i < X; i++) { + if (!scalar_decode_signed(&out->v[i], in + i * bits * kDegree / 8, bits, + max)) { + return 0; + } + } + return 1; +} + +// FIPS 204, Algorithm 28 (`w1Encode`). +template +void w1_encode(uint8_t out[128 * K], const vector *w1) { + vector_encode(out, w1, 4); +} + +// FIPS 204, Algorithm 20 (`HintBitPack`). +template +void hint_bit_pack(uint8_t out[omega() + K], const vector *h) { + OPENSSL_memset(out, 0, omega() + K); + int index = 0; + for (int i = 0; i < K; i++) { + for (int j = 0; j < kDegree; j++) { + if (h->v[i].c[j]) { + // h must have at most omega() non-zero coefficients. + BSSL_CHECK(index < omega()); + out[index++] = j; + } + } + out[omega() + i] = index; + } +} + +// FIPS 204, Algorithm 21 (`HintBitUnpack`). +template +int hint_bit_unpack(vector *h, const uint8_t in[omega() + K]) { + vector_zero(h); + int index = 0; + for (int i = 0; i < K; i++) { + const int limit = in[omega() + i]; + if (limit < index || limit > omega()) { + return 0; + } + + int last = -1; + while (index < limit) { + int byte = in[index++]; + if (last >= 0 && byte <= last) { + return 0; + } + last = byte; + static_assert(kDegree == 256, + "kDegree must be 256 for this write to be in bounds"); + h->v[i].c[byte] = 1; + } + } + for (; index < omega(); index++) { + if (in[index] != 0) { + return 0; + } + } + return 1; +} + +template +struct public_key { + uint8_t rho[kRhoBytes]; + vector t1; + // Pre-cached value(s). + uint8_t public_key_hash[kTrBytes]; +}; + +template +struct private_key { + uint8_t rho[kRhoBytes]; + uint8_t k[kKBytes]; + uint8_t public_key_hash[kTrBytes]; + vector s1; + vector s2; + vector t0; +}; + +template +struct signature { + uint8_t c_tilde[2 * lambda_bytes()]; + vector z; + vector h; +}; + +// FIPS 204, Algorithm 22 (`pkEncode`). +template +int mldsa_marshal_public_key(CBB *out, const struct public_key *pub) { + if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { + return 0; + } + + uint8_t *vectork_output; + if (!CBB_add_space(out, &vectork_output, 320 * K)) { + return 0; + } + vector_encode(vectork_output, &pub->t1, 10); + + return 1; +} + +// FIPS 204, Algorithm 23 (`pkDecode`). +template +int mldsa_parse_public_key(struct public_key *pub, CBS *in) { + const CBS orig_in = *in; + + if (!CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { + return 0; + } + + CBS t1_bytes; + if (!CBS_get_bytes(in, &t1_bytes, 320 * K) || CBS_len(in) != 0) { + return 0; + } + vector_decode_10(&pub->t1, CBS_data(&t1_bytes)); + + // Compute pre-cached values. + BORINGSSL_keccak(pub->public_key_hash, sizeof(pub->public_key_hash), + CBS_data(&orig_in), CBS_len(&orig_in), boringssl_shake256); + + return 1; +} + +// FIPS 204, Algorithm 24 (`skEncode`). +template +int mldsa_marshal_private_key(CBB *out, const struct private_key *priv) { + if (!CBB_add_bytes(out, priv->rho, sizeof(priv->rho)) || + !CBB_add_bytes(out, priv->k, sizeof(priv->k)) || + !CBB_add_bytes(out, priv->public_key_hash, + sizeof(priv->public_key_hash))) { + return 0; + } + + constexpr size_t scalar_bytes = + (kDegree * plus_minus_eta_bitlen() + 7) / 8; + uint8_t *vectorl_output; + if (!CBB_add_space(out, &vectorl_output, scalar_bytes * L)) { + return 0; + } + vector_encode_signed(vectorl_output, &priv->s1, plus_minus_eta_bitlen(), + eta()); + + uint8_t *s2_output; + if (!CBB_add_space(out, &s2_output, scalar_bytes * K)) { + return 0; + } + vector_encode_signed(s2_output, &priv->s2, plus_minus_eta_bitlen(), + eta()); + + uint8_t *t0_output; + if (!CBB_add_space(out, &t0_output, 416 * K)) { + return 0; + } + vector_encode_signed(t0_output, &priv->t0, 13, 1 << 12); + + return 1; +} + +// FIPS 204, Algorithm 25 (`skDecode`). +template +int mldsa_parse_private_key(struct private_key *priv, CBS *in) { + CBS s1_bytes; + CBS s2_bytes; + CBS t0_bytes; + constexpr size_t scalar_bytes = + (kDegree * plus_minus_eta_bitlen() + 7) / 8; + if (!CBS_copy_bytes(in, priv->rho, sizeof(priv->rho)) || + !CBS_copy_bytes(in, priv->k, sizeof(priv->k)) || + !CBS_copy_bytes(in, priv->public_key_hash, + sizeof(priv->public_key_hash)) || + !CBS_get_bytes(in, &s1_bytes, scalar_bytes * L) || + !vector_decode_signed(&priv->s1, CBS_data(&s1_bytes), + plus_minus_eta_bitlen(), eta()) || + !CBS_get_bytes(in, &s2_bytes, scalar_bytes * K) || + !vector_decode_signed(&priv->s2, CBS_data(&s2_bytes), + plus_minus_eta_bitlen(), eta()) || + !CBS_get_bytes(in, &t0_bytes, 416 * K) || + // Note: Decoding 13 bits into (-2^12, 2^12] cannot fail. + !vector_decode_signed(&priv->t0, CBS_data(&t0_bytes), 13, 1 << 12)) { + return 0; + } + + return 1; +} + +// FIPS 204, Algorithm 26 (`sigEncode`). +template +int mldsa_marshal_signature(CBB *out, const struct signature *sign) { + if (!CBB_add_bytes(out, sign->c_tilde, sizeof(sign->c_tilde))) { + return 0; + } + + uint8_t *vectorl_output; + if (!CBB_add_space(out, &vectorl_output, 640 * L)) { + return 0; + } + vector_encode_signed(vectorl_output, &sign->z, 20, 1 << 19); + + uint8_t *hint_output; + if (!CBB_add_space(out, &hint_output, omega() + K)) { + return 0; + } + hint_bit_pack(hint_output, &sign->h); + + return 1; +} + +// FIPS 204, Algorithm 27 (`sigDecode`). +template +int mldsa_parse_signature(struct signature *sign, CBS *in) { + CBS z_bytes; + CBS hint_bytes; + if (!CBS_copy_bytes(in, sign->c_tilde, sizeof(sign->c_tilde)) || + !CBS_get_bytes(in, &z_bytes, 640 * L) || + // Note: Decoding 20 bits into (-2^19, 2^19] cannot fail. + !vector_decode_signed(&sign->z, CBS_data(&z_bytes), 20, 1 << 19) || + !CBS_get_bytes(in, &hint_bytes, omega() + K) || + !hint_bit_unpack(&sign->h, CBS_data(&hint_bytes))) { + return 0; + }; + + return 1; +} + +template +struct DeleterFree { + void operator()(T *ptr) { OPENSSL_free(ptr); } +}; + +// FIPS 204, Algorithm 6 (`ML-DSA.KeyGen_internal`). Returns 1 on success and 0 +// on failure. +template +int mldsa_generate_key_external_entropy( + uint8_t out_encoded_public_key[public_key_bytes()], + struct private_key *priv, + const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { + // Intermediate values, allocated on the heap to allow use when there is a + // limited amount of stack. + struct values_st { + struct public_key pub; + matrix a_ntt; + vector s1_ntt; + vector t; + }; + std::unique_ptr> values( + reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); + if (values == NULL) { + return 0; + } + + uint8_t augmented_entropy[BCM_MLDSA_SEED_BYTES + 2]; + OPENSSL_memcpy(augmented_entropy, entropy, BCM_MLDSA_SEED_BYTES); + // The k and l parameters are appended to the seed. + augmented_entropy[BCM_MLDSA_SEED_BYTES] = K; + augmented_entropy[BCM_MLDSA_SEED_BYTES + 1] = L; + uint8_t expanded_seed[kRhoBytes + kSigmaBytes + kKBytes]; + BORINGSSL_keccak(expanded_seed, sizeof(expanded_seed), augmented_entropy, + sizeof(augmented_entropy), boringssl_shake256); + const uint8_t *const rho = expanded_seed; + const uint8_t *const sigma = expanded_seed + kRhoBytes; + const uint8_t *const k = expanded_seed + kRhoBytes + kSigmaBytes; + // rho is public. + CONSTTIME_DECLASSIFY(rho, kRhoBytes); + OPENSSL_memcpy(values->pub.rho, rho, sizeof(values->pub.rho)); + OPENSSL_memcpy(priv->rho, rho, sizeof(priv->rho)); + OPENSSL_memcpy(priv->k, k, sizeof(priv->k)); + + matrix_expand(&values->a_ntt, rho); + vector_expand_short(&priv->s1, &priv->s2, sigma); + + OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); + vector_ntt(&values->s1_ntt); + + matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); + vector_inverse_ntt(&values->t); + vector_add(&values->t, &values->t, &priv->s2); + + vector_power2_round(&values->pub.t1, &priv->t0, &values->t); + // t1 is public. + CONSTTIME_DECLASSIFY(&values->pub.t1, sizeof(values->pub.t1)); + + CBB cbb; + CBB_init_fixed(&cbb, out_encoded_public_key, public_key_bytes()); + if (!mldsa_marshal_public_key(&cbb, &values->pub)) { + return 0; + } + assert(CBB_len(&cbb) == public_key_bytes()); + + BORINGSSL_keccak(priv->public_key_hash, sizeof(priv->public_key_hash), + out_encoded_public_key, public_key_bytes(), + boringssl_shake256); + + return 1; +} + +template +int mldsa_public_from_private(struct public_key *pub, + const struct private_key *priv) { + // Intermediate values, allocated on the heap to allow use when there is a + // limited amount of stack. + struct values_st { + matrix a_ntt; + vector s1_ntt; + vector t; + vector t0; + }; + std::unique_ptr> values( + reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); + if (values == NULL) { + return 0; + } + + + OPENSSL_memcpy(pub->rho, priv->rho, sizeof(pub->rho)); + OPENSSL_memcpy(pub->public_key_hash, priv->public_key_hash, + sizeof(pub->public_key_hash)); + + matrix_expand(&values->a_ntt, priv->rho); + + OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); + vector_ntt(&values->s1_ntt); + + matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); + vector_inverse_ntt(&values->t); + vector_add(&values->t, &values->t, &priv->s2); + + vector_power2_round(&pub->t1, &values->t0, &values->t); + return 1; +} + +// FIPS 204, Algorithm 7 (`ML-DSA.Sign_internal`). Returns 1 on success and 0 +// on failure. +template +int mldsa_sign_internal( + uint8_t out_encoded_signature[signature_bytes()], + const struct private_key *priv, const uint8_t *msg, size_t msg_len, + const uint8_t *context_prefix, size_t context_prefix_len, + const uint8_t *context, size_t context_len, + const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { + uint8_t mu[kMuBytes]; + struct BORINGSSL_keccak_st keccak_ctx; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, priv->public_key_hash, + sizeof(priv->public_key_hash)); + BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); + BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); + BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); + BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); + + uint8_t rho_prime[kRhoPrimeBytes]; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, priv->k, sizeof(priv->k)); + BORINGSSL_keccak_absorb(&keccak_ctx, randomizer, + BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES); + BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); + BORINGSSL_keccak_squeeze(&keccak_ctx, rho_prime, kRhoPrimeBytes); + + // Intermediate values, allocated on the heap to allow use when there is a + // limited amount of stack. + struct values_st { + struct signature sign; + vector s1_ntt; + vector s2_ntt; + vector t0_ntt; + matrix a_ntt; + vector y; + vector w; + vector w1; + vector cs1; + vector cs2; + }; + std::unique_ptr> values( + reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); + if (values == NULL) { + return 0; + } + OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); + vector_ntt(&values->s1_ntt); + + OPENSSL_memcpy(&values->s2_ntt, &priv->s2, sizeof(values->s2_ntt)); + vector_ntt(&values->s2_ntt); + + OPENSSL_memcpy(&values->t0_ntt, &priv->t0, sizeof(values->t0_ntt)); + vector_ntt(&values->t0_ntt); + + matrix_expand(&values->a_ntt, priv->rho); + + // kappa must not exceed 2**16/L = 13107. But the probability of it + // exceeding even 1000 iterations is vanishingly small. + for (size_t kappa = 0;; kappa += L) { + vector_expand_mask(&values->y, rho_prime, kappa); + + vector *y_ntt = &values->cs1; + OPENSSL_memcpy(y_ntt, &values->y, sizeof(*y_ntt)); + vector_ntt(y_ntt); + + matrix_mult(&values->w, &values->a_ntt, y_ntt); + vector_inverse_ntt(&values->w); + + vector_high_bits(&values->w1, &values->w); + uint8_t w1_encoded[128 * K]; + w1_encode(w1_encoded, &values->w1); + + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); + BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); + BORINGSSL_keccak_squeeze(&keccak_ctx, values->sign.c_tilde, + 2 * lambda_bytes()); + + scalar c_ntt; + scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, + sizeof(values->sign.c_tilde), tau()); + scalar_ntt(&c_ntt); + + vector_mult_scalar(&values->cs1, &values->s1_ntt, &c_ntt); + vector_inverse_ntt(&values->cs1); + vector_mult_scalar(&values->cs2, &values->s2_ntt, &c_ntt); + vector_inverse_ntt(&values->cs2); + + vector_add(&values->sign.z, &values->y, &values->cs1); + + vector *r0 = &values->w1; + vector_sub(r0, &values->w, &values->cs2); + vector_low_bits(r0, r0); + + // Leaking the fact that a signature was rejected is fine as the next + // attempt at a signature will be (indistinguishable from) independent of + // this one. Note, however, that we additionally leak which of the two + // branches rejected the signature. Section 5.5 of + // https://pq-crystals.org/dilithium/data/dilithium-specification-round3.pdf + // describes this leak as OK. Note we leak less than what is described by + // the paper; we do not reveal which coefficient violated the bound, and + // we hide which of the |z_max| or |r0_max| bound failed. See also + // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/2bbab0fa_d241d35a/ + uint32_t z_max = vector_max(&values->sign.z); + uint32_t r0_max = vector_max_signed(r0); + if (constant_time_declassify_w( + constant_time_ge_w(z_max, gamma1() - beta()) | + constant_time_ge_w(r0_max, kGamma2 - beta()))) { + continue; + } + + vector *ct0 = &values->w1; + vector_mult_scalar(ct0, &values->t0_ntt, &c_ntt); + vector_inverse_ntt(ct0); + vector_make_hint(&values->sign.h, ct0, &values->cs2, &values->w); + + // See above. + uint32_t ct0_max = vector_max(ct0); + size_t h_ones = vector_count_ones(&values->sign.h); + if (constant_time_declassify_w(constant_time_ge_w(ct0_max, kGamma2) | + constant_time_lt_w(omega(), h_ones))) { + continue; + } + + // Although computed with the private key, the signature is public. + CONSTTIME_DECLASSIFY(values->sign.c_tilde, sizeof(values->sign.c_tilde)); + CONSTTIME_DECLASSIFY(&values->sign.z, sizeof(values->sign.z)); + CONSTTIME_DECLASSIFY(&values->sign.h, sizeof(values->sign.h)); + + CBB cbb; + CBB_init_fixed(&cbb, out_encoded_signature, signature_bytes()); + if (!mldsa_marshal_signature(&cbb, &values->sign)) { + return 0; + } + + BSSL_CHECK(CBB_len(&cbb) == signature_bytes()); + return 1; + } +} + +// FIPS 204, Algorithm 8 (`ML-DSA.Verify_internal`). +template +int mldsa_verify_internal(const struct public_key *pub, + const uint8_t encoded_signature[signature_bytes()], + const uint8_t *msg, size_t msg_len, + const uint8_t *context_prefix, + size_t context_prefix_len, const uint8_t *context, + size_t context_len) { + // Intermediate values, allocated on the heap to allow use when there is a + // limited amount of stack. + struct values_st { + struct signature sign; + matrix a_ntt; + vector z_ntt; + vector az_ntt; + vector ct1_ntt; + }; + std::unique_ptr> values( + reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); + if (values == NULL) { + return 0; + } + + CBS cbs; + CBS_init(&cbs, encoded_signature, signature_bytes()); + if (!mldsa_parse_signature(&values->sign, &cbs)) { + return 0; + } + + matrix_expand(&values->a_ntt, pub->rho); + + uint8_t mu[kMuBytes]; + struct BORINGSSL_keccak_st keccak_ctx; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, pub->public_key_hash, + sizeof(pub->public_key_hash)); + BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); + BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); + BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); + BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); + + scalar c_ntt; + scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, + sizeof(values->sign.c_tilde), tau()); + scalar_ntt(&c_ntt); + + OPENSSL_memcpy(&values->z_ntt, &values->sign.z, sizeof(values->z_ntt)); + vector_ntt(&values->z_ntt); + + matrix_mult(&values->az_ntt, &values->a_ntt, &values->z_ntt); + + vector_scale_power2_round(&values->ct1_ntt, &pub->t1); + vector_ntt(&values->ct1_ntt); + + vector_mult_scalar(&values->ct1_ntt, &values->ct1_ntt, &c_ntt); + + vector *const w1 = &values->az_ntt; + vector_sub(w1, &values->az_ntt, &values->ct1_ntt); + vector_inverse_ntt(w1); + + vector_use_hint_vartime(w1, &values->sign.h, w1); + uint8_t w1_encoded[128 * K]; + w1_encode(w1_encoded, w1); + + uint8_t c_tilde[2 * lambda_bytes()]; + BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); + BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); + BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); + BORINGSSL_keccak_squeeze(&keccak_ctx, c_tilde, 2 * lambda_bytes()); + + uint32_t z_max = vector_max(&values->sign.z); + return z_max < static_cast(gamma1() - beta()) && + OPENSSL_memcmp(c_tilde, values->sign.c_tilde, 2 * lambda_bytes()) == + 0; +} + +struct private_key<6, 5> *private_key_from_external_65( + const struct BCM_mldsa65_private_key *external) { + static_assert(sizeof(struct BCM_mldsa65_private_key) == + sizeof(struct private_key<6, 5>), + "MLDSA65 private key size incorrect"); + static_assert(alignof(struct BCM_mldsa65_private_key) == + alignof(struct private_key<6, 5>), + "MLDSA65 private key align incorrect"); + return (struct private_key<6, 5> *)external; +} + +struct public_key<6> * +public_key_from_external_65(const struct BCM_mldsa65_public_key *external) { + static_assert(sizeof(struct BCM_mldsa65_public_key) == + sizeof(struct public_key<6>), + "MLDSA65 public key size incorrect"); + static_assert(alignof(struct BCM_mldsa65_public_key) == + alignof(struct public_key<6>), + "MLDSA65 public key align incorrect"); + return (struct public_key<6> *)external; +} + +struct private_key<8, 7> * +private_key_from_external_87(const struct BCM_mldsa87_private_key *external) { + static_assert(sizeof(struct BCM_mldsa87_private_key) == + sizeof(struct private_key<8, 7>), + "MLDSA87 private key size incorrect"); + static_assert(alignof(struct BCM_mldsa87_private_key) == + alignof(struct private_key<8, 7>), + "MLDSA87 private key align incorrect"); + return (struct private_key<8, 7> *)external; +} + +struct public_key<8> * +public_key_from_external_87(const struct BCM_mldsa87_public_key *external) { + static_assert(sizeof(struct BCM_mldsa87_public_key) == + sizeof(struct public_key<8>), + "MLDSA87 public key size incorrect"); + static_assert(alignof(struct BCM_mldsa87_public_key) == + alignof(struct public_key<8>), + "MLDSA87 public key align incorrect"); + return (struct public_key<8> *)external; +} + +} // namespace +} // namespace mldsa + + +// ML-DSA-65 specific wrappers. + +bcm_status BCM_mldsa65_parse_public_key( + struct BCM_mldsa65_public_key *public_key, CBS *in) { + return bcm_as_approved_status(mldsa_parse_public_key( + mldsa::public_key_from_external_65(public_key), in)); +} + +bcm_status BCM_mldsa65_marshal_private_key( + CBB *out, const struct BCM_mldsa65_private_key *private_key) { + return bcm_as_approved_status(mldsa_marshal_private_key( + out, mldsa::private_key_from_external_65(private_key))); +} + +bcm_status BCM_mldsa65_parse_private_key( + struct BCM_mldsa65_private_key *private_key, CBS *in) { + return bcm_as_approved_status( + mldsa_parse_private_key(mldsa::private_key_from_external_65(private_key), + in) && + CBS_len(in) == 0); +} + +// Calls |MLDSA_generate_key_external_entropy| with random bytes from +// |BCM_rand_bytes|. +bcm_status BCM_mldsa65_generate_key( + uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], + uint8_t out_seed[BCM_MLDSA_SEED_BYTES], + struct BCM_mldsa65_private_key *out_private_key) { + BCM_rand_bytes(out_seed, BCM_MLDSA_SEED_BYTES); + return BCM_mldsa65_generate_key_external_entropy(out_encoded_public_key, + out_private_key, out_seed); +} + +bcm_status BCM_mldsa65_private_key_from_seed( + struct BCM_mldsa65_private_key *out_private_key, + const uint8_t seed[BCM_MLDSA_SEED_BYTES]) { + uint8_t public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES]; + return BCM_mldsa65_generate_key_external_entropy(public_key, out_private_key, + seed); +} + +bcm_status BCM_mldsa65_generate_key_external_entropy( + uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], + struct BCM_mldsa65_private_key *out_private_key, + const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { + return bcm_as_approved_status(mldsa_generate_key_external_entropy( + out_encoded_public_key, + mldsa::private_key_from_external_65(out_private_key), entropy)); +} + +bcm_status BCM_mldsa65_public_from_private( + struct BCM_mldsa65_public_key *out_public_key, + const struct BCM_mldsa65_private_key *private_key) { + return bcm_as_approved_status(mldsa_public_from_private( + mldsa::public_key_from_external_65(out_public_key), + mldsa::private_key_from_external_65(private_key))); +} + +bcm_status BCM_mldsa65_sign_internal( + uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, + const uint8_t *context, size_t context_len, + const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { + return bcm_as_approved_status(mldsa_sign_internal( + out_encoded_signature, mldsa::private_key_from_external_65(private_key), + msg, msg_len, context_prefix, context_prefix_len, context, context_len, + randomizer)); +} + +// ML-DSA signature in randomized mode, filling the random bytes with +// |BCM_rand_bytes|. +bcm_status BCM_mldsa65_sign( + uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len) { + BSSL_CHECK(context_len <= 255); + uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]; + BCM_rand_bytes(randomizer, sizeof(randomizer)); + + const uint8_t context_prefix[2] = {0, static_cast(context_len)}; + return BCM_mldsa65_sign_internal( + out_encoded_signature, private_key, msg, msg_len, context_prefix, + sizeof(context_prefix), context, context_len, randomizer); +} + +// FIPS 204, Algorithm 3 (`ML-DSA.Verify`). +bcm_status BCM_mldsa65_verify( + const struct BCM_mldsa65_public_key *public_key, + const uint8_t signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len) { + BSSL_CHECK(context_len <= 255); + const uint8_t context_prefix[2] = {0, static_cast(context_len)}; + return BCM_mldsa65_verify_internal(public_key, signature, msg, msg_len, + context_prefix, sizeof(context_prefix), + context, context_len); +} + +bcm_status BCM_mldsa65_verify_internal( + const struct BCM_mldsa65_public_key *public_key, + const uint8_t encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], + const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, + size_t context_prefix_len, const uint8_t *context, size_t context_len) { + return bcm_as_approved_status(mldsa::mldsa_verify_internal<6, 5>( + mldsa::public_key_from_external_65(public_key), encoded_signature, msg, + msg_len, context_prefix, context_prefix_len, context, context_len)); +} + +bcm_status BCM_mldsa65_marshal_public_key( + CBB *out, const struct BCM_mldsa65_public_key *public_key) { + return bcm_as_approved_status(mldsa_marshal_public_key( + out, mldsa::public_key_from_external_65(public_key))); +} + + +// ML-DSA-87 specific wrappers. + +bcm_status BCM_mldsa87_parse_public_key( + struct BCM_mldsa87_public_key *public_key, CBS *in) { + return bcm_as_approved_status(mldsa_parse_public_key( + mldsa::public_key_from_external_87(public_key), in)); +} + +bcm_status BCM_mldsa87_marshal_private_key( + CBB *out, const struct BCM_mldsa87_private_key *private_key) { + return bcm_as_approved_status(mldsa_marshal_private_key( + out, mldsa::private_key_from_external_87(private_key))); +} + +bcm_status BCM_mldsa87_parse_private_key( + struct BCM_mldsa87_private_key *private_key, CBS *in) { + return bcm_as_approved_status( + mldsa_parse_private_key(mldsa::private_key_from_external_87(private_key), + in) && + CBS_len(in) == 0); +} + +// Calls |MLDSA_generate_key_external_entropy| with random bytes from +// |BCM_rand_bytes|. +bcm_status BCM_mldsa87_generate_key( + uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], + uint8_t out_seed[BCM_MLDSA_SEED_BYTES], + struct BCM_mldsa87_private_key *out_private_key) { + BCM_rand_bytes(out_seed, BCM_MLDSA_SEED_BYTES); + return BCM_mldsa87_generate_key_external_entropy(out_encoded_public_key, + out_private_key, out_seed); +} + +bcm_status BCM_mldsa87_private_key_from_seed( + struct BCM_mldsa87_private_key *out_private_key, + const uint8_t seed[BCM_MLDSA_SEED_BYTES]) { + uint8_t public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES]; + return BCM_mldsa87_generate_key_external_entropy(public_key, out_private_key, + seed); +} + +bcm_status BCM_mldsa87_generate_key_external_entropy( + uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], + struct BCM_mldsa87_private_key *out_private_key, + const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { + return bcm_as_approved_status(mldsa_generate_key_external_entropy( + out_encoded_public_key, + mldsa::private_key_from_external_87(out_private_key), entropy)); +} + +bcm_status BCM_mldsa87_public_from_private( + struct BCM_mldsa87_public_key *out_public_key, + const struct BCM_mldsa87_private_key *private_key) { + return bcm_as_approved_status(mldsa_public_from_private( + mldsa::public_key_from_external_87(out_public_key), + mldsa::private_key_from_external_87(private_key))); +} + +bcm_status BCM_mldsa87_sign_internal( + uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, + const uint8_t *context, size_t context_len, + const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { + return bcm_as_approved_status(mldsa_sign_internal( + out_encoded_signature, mldsa::private_key_from_external_87(private_key), + msg, msg_len, context_prefix, context_prefix_len, context, context_len, + randomizer)); +} + +// ML-DSA signature in randomized mode, filling the random bytes with +// |BCM_rand_bytes|. +bcm_status BCM_mldsa87_sign( + uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, + size_t msg_len, const uint8_t *context, size_t context_len) { + BSSL_CHECK(context_len <= 255); + uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]; + BCM_rand_bytes(randomizer, sizeof(randomizer)); + + const uint8_t context_prefix[2] = {0, static_cast(context_len)}; + return BCM_mldsa87_sign_internal( + out_encoded_signature, private_key, msg, msg_len, context_prefix, + sizeof(context_prefix), context, context_len, randomizer); +} + +// FIPS 204, Algorithm 3 (`ML-DSA.Verify`). +bcm_status BCM_mldsa87_verify(const struct BCM_mldsa87_public_key *public_key, + const uint8_t *signature, const uint8_t *msg, + size_t msg_len, const uint8_t *context, + size_t context_len) { + BSSL_CHECK(context_len <= 255); + const uint8_t context_prefix[2] = {0, static_cast(context_len)}; + return BCM_mldsa87_verify_internal(public_key, signature, msg, msg_len, + context_prefix, sizeof(context_prefix), + context, context_len); +} + +bcm_status BCM_mldsa87_verify_internal( + const struct BCM_mldsa87_public_key *public_key, + const uint8_t encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], + const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, + size_t context_prefix_len, const uint8_t *context, size_t context_len) { + return bcm_as_approved_status(mldsa::mldsa_verify_internal<8, 7>( + mldsa::public_key_from_external_87(public_key), encoded_signature, msg, + msg_len, context_prefix, context_prefix_len, context, context_len)); +} + +bcm_status BCM_mldsa87_marshal_public_key( + CBB *out, const struct BCM_mldsa87_public_key *public_key) { + return bcm_as_approved_status(mldsa_marshal_public_key( + out, mldsa::public_key_from_external_87(public_key))); +} diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm.cc.inc index 98862544..e6e3fa1f 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm.cc.inc @@ -135,14 +135,42 @@ void gcm_init_ssse3(u128 Htable[16], const uint64_t H[2]) { #if defined(HW_GCM) && defined(OPENSSL_X86_64) static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], - uint8_t Xi[16], const u128 Htable[16]) { - return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi); + uint8_t Xi[16], const u128 Htable[16], + enum gcm_impl_t impl) { + switch (impl) { + case gcm_x86_vaes_avx10_256: + len &= kSizeTWithoutLower4Bits; + aes_gcm_enc_update_vaes_avx10_256(in, out, len, key, ivec, Htable, Xi); + CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); + return len; + case gcm_x86_vaes_avx10_512: + len &= kSizeTWithoutLower4Bits; + aes_gcm_enc_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi); + CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); + return len; + default: + return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi); + } } static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], - uint8_t Xi[16], const u128 Htable[16]) { - return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi); + uint8_t Xi[16], const u128 Htable[16], + enum gcm_impl_t impl) { + switch (impl) { + case gcm_x86_vaes_avx10_256: + len &= kSizeTWithoutLower4Bits; + aes_gcm_dec_update_vaes_avx10_256(in, out, len, key, ivec, Htable, Xi); + CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); + return len; + case gcm_x86_vaes_avx10_512: + len &= kSizeTWithoutLower4Bits; + aes_gcm_dec_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi); + CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); + return len; + default: + return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi); + } } #endif // HW_GCM && X86_64 @@ -150,7 +178,8 @@ static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], - uint8_t Xi[16], const u128 Htable[16]) { + uint8_t Xi[16], const u128 Htable[16], + enum gcm_impl_t impl) { const size_t len_blocks = len & kSizeTWithoutLower4Bits; if (!len_blocks) { return 0; @@ -161,7 +190,8 @@ static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], - uint8_t Xi[16], const u128 Htable[16]) { + uint8_t Xi[16], const u128 Htable[16], + enum gcm_impl_t impl) { const size_t len_blocks = len & kSizeTWithoutLower4Bits; if (!len_blocks) { return 0; @@ -173,21 +203,28 @@ static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, #endif // HW_GCM && AARCH64 void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, - u128 out_table[16], int *out_is_avx, - const uint8_t gcm_key[16]) { - *out_is_avx = 0; - + u128 out_table[16], const uint8_t gcm_key[16]) { // H is passed to |gcm_init_*| as a pair of byte-swapped, 64-bit values. uint64_t H[2] = {CRYPTO_load_u64_be(gcm_key), CRYPTO_load_u64_be(gcm_key + 8)}; #if defined(GHASH_ASM_X86_64) if (crypto_gcm_clmul_enabled()) { + if (CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() && + CRYPTO_is_VPCLMULQDQ_capable() && CRYPTO_is_BMI2_capable()) { + gcm_init_vpclmulqdq_avx10(out_table, H); + *out_mult = gcm_gmult_vpclmulqdq_avx10; + if (CRYPTO_cpu_avoid_zmm_registers()) { + *out_hash = gcm_ghash_vpclmulqdq_avx10_256; + } else { + *out_hash = gcm_ghash_vpclmulqdq_avx10_512; + } + return; + } if (CRYPTO_is_AVX_capable() && CRYPTO_is_MOVBE_capable()) { gcm_init_avx(out_table, H); *out_mult = gcm_gmult_avx; *out_hash = gcm_ghash_avx; - *out_is_avx = 1; return; } gcm_init_clmul(out_table, H); @@ -244,14 +281,25 @@ void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, const AES_KEY *aes_key, OPENSSL_memset(ghash_key, 0, sizeof(ghash_key)); (*block)(ghash_key, ghash_key, aes_key); - int is_avx; - CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, gcm_key->Htable, &is_avx, + CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, gcm_key->Htable, ghash_key); -#if defined(OPENSSL_AARCH64) && !defined(OPENSSL_NO_ASM) - gcm_key->use_hw_gcm_crypt = (gcm_pmull_capable() && block_is_hwaes) ? 1 : 0; -#else - gcm_key->use_hw_gcm_crypt = (is_avx && block_is_hwaes) ? 1 : 0; +#if !defined(OPENSSL_NO_ASM) +#if defined(OPENSSL_X86_64) + if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx10_256 && + CRYPTO_is_VAES_capable()) { + gcm_key->impl = gcm_x86_vaes_avx10_256; + } else if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx10_512 && + CRYPTO_is_VAES_capable()) { + gcm_key->impl = gcm_x86_vaes_avx10_512; + } else if (gcm_key->ghash == gcm_ghash_avx && block_is_hwaes) { + gcm_key->impl = gcm_x86_aesni; + } +#elif defined(OPENSSL_AARCH64) + if (gcm_pmull_capable() && block_is_hwaes) { + gcm_key->impl = gcm_arm64_aes; + } +#endif #endif } @@ -565,11 +613,11 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key, #if defined(HW_GCM) // Check |len| to work around a C language bug. See https://crbug.com/1019588. - if (ctx->gcm_key.use_hw_gcm_crypt && len > 0) { + if (ctx->gcm_key.impl != gcm_separate && len > 0) { // |hw_gcm_encrypt| may not process all the input given to it. It may // not process *any* of its input if it is deemed too small. size_t bulk = hw_gcm_encrypt(in, out, len, key, ctx->Yi, ctx->Xi, - ctx->gcm_key.Htable); + ctx->gcm_key.Htable, ctx->gcm_key.impl); in += bulk; out += bulk; len -= bulk; @@ -654,11 +702,11 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key, #if defined(HW_GCM) // Check |len| to work around a C language bug. See https://crbug.com/1019588. - if (ctx->gcm_key.use_hw_gcm_crypt && len > 0) { + if (ctx->gcm_key.impl != gcm_separate && len > 0) { // |hw_gcm_decrypt| may not process all the input given to it. It may // not process *any* of its input if it is deemed too small. size_t bulk = hw_gcm_decrypt(in, out, len, key, ctx->Yi, ctx->Xi, - ctx->gcm_key.Htable); + ctx->gcm_key.Htable, ctx->gcm_key.impl); in += bulk; out += bulk; len -= bulk; diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm_nohw.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm_nohw.cc.inc index 4859cbd0..486d1164 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm_nohw.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/gcm_nohw.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/internal.h index cd2079e1..f650e762 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/internal.h @@ -126,6 +126,15 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, // can be safely copied. Additionally, |gcm_key| is split into a separate // struct. +// gcm_impl_t specifies an assembly implementation of AES-GCM. +enum gcm_impl_t { + gcm_separate = 0, // No combined AES-GCM, but may have AES-CTR and GHASH. + gcm_x86_aesni, + gcm_x86_vaes_avx10_256, + gcm_x86_vaes_avx10_512, + gcm_arm64_aes, +}; + typedef struct { uint64_t hi,lo; } u128; // gmult_func multiplies |Xi| by the GCM key and writes the result back to @@ -148,10 +157,7 @@ typedef struct gcm128_key_st { ghash_func ghash; block128_f block; - - // use_hw_gcm_crypt is true if this context should use platform-specific - // assembly to process GCM data. - unsigned use_hw_gcm_crypt:1; + enum gcm_impl_t impl; } GCM128_KEY; // GCM128_CONTEXT contains state for a single GCM operation. The structure @@ -182,72 +188,62 @@ int crypto_gcm_clmul_enabled(void); // CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to // |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware -// accelerated) functions for performing operations in the GHASH field. If the -// AVX implementation was used |*out_is_avx| will be true. +// accelerated) functions for performing operations in the GHASH field. void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, - u128 out_table[16], int *out_is_avx, - const uint8_t gcm_key[16]); + u128 out_table[16], const uint8_t gcm_key[16]); // CRYPTO_gcm128_init_key initialises |gcm_key| to use |block| (typically AES) // with the given key. |block_is_hwaes| is one if |block| is |aes_hw_encrypt|. -OPENSSL_EXPORT void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, - const AES_KEY *key, block128_f block, - int block_is_hwaes); +void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, const AES_KEY *key, + block128_f block, int block_is_hwaes); // CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the // same key that was passed to |CRYPTO_gcm128_init|. -OPENSSL_EXPORT void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const AES_KEY *key, - const uint8_t *iv, size_t iv_len); +void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const AES_KEY *key, + const uint8_t *iv, size_t iv_len); // CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM. // This must be called before and data is encrypted. It returns one on success // and zero otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, - size_t len); +int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len); // CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key| // must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one // on success and zero otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, - const AES_KEY *key, const uint8_t *in, - uint8_t *out, size_t len); +int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, + const uint8_t *in, uint8_t *out, size_t len); // CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key| // must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one // on success and zero otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, - const AES_KEY *key, const uint8_t *in, - uint8_t *out, size_t len); +int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, + const uint8_t *in, uint8_t *out, size_t len); // CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using // a CTR function that only handles the bottom 32 bits of the nonce, like // |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was // passed to |CRYPTO_gcm128_init|. It returns one on success and zero // otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, - const AES_KEY *key, - const uint8_t *in, uint8_t *out, - size_t len, ctr128_f stream); +int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key, + const uint8_t *in, uint8_t *out, size_t len, + ctr128_f stream); // CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using // a CTR function that only handles the bottom 32 bits of the nonce, like // |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was // passed to |CRYPTO_gcm128_init|. It returns one on success and zero // otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, - const AES_KEY *key, - const uint8_t *in, uint8_t *out, - size_t len, ctr128_f stream); +int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key, + const uint8_t *in, uint8_t *out, size_t len, + ctr128_f stream); // CRYPTO_gcm128_finish calculates the authenticator and compares it against // |len| bytes of |tag|. It returns one on success and zero otherwise. -OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, - size_t len); +int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len); // CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. // The minimum of |len| and 16 bytes are copied into |tag|. -OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag, - size_t len); +void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag, size_t len); // GCM assembly. @@ -287,6 +283,30 @@ size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); + +void gcm_init_vpclmulqdq_avx10(u128 Htable[16], const uint64_t H[2]); +void gcm_gmult_vpclmulqdq_avx10(uint8_t Xi[16], const u128 Htable[16]); +void gcm_ghash_vpclmulqdq_avx10_256(uint8_t Xi[16], const u128 Htable[16], + const uint8_t *in, size_t len); +void gcm_ghash_vpclmulqdq_avx10_512(uint8_t Xi[16], const u128 Htable[16], + const uint8_t *in, size_t len); +void aes_gcm_enc_update_vaes_avx10_256(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16], + const u128 Htable[16], uint8_t Xi[16]); +void aes_gcm_dec_update_vaes_avx10_256(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16], + const u128 Htable[16], uint8_t Xi[16]); +void aes_gcm_enc_update_vaes_avx10_512(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16], + const u128 Htable[16], uint8_t Xi[16]); +void aes_gcm_dec_update_vaes_avx10_512(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16], + const u128 Htable[16], uint8_t Xi[16]); + #endif // OPENSSL_X86_64 #if defined(OPENSSL_X86) diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/polyval.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/polyval.cc.inc index 88cb52f0..986260f5 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/polyval.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/modes/polyval.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -56,8 +56,7 @@ void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]) { OPENSSL_memcpy(H, key, 16); reverse_and_mulX_ghash(H); - int is_avx; - CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, ctx->Htable, &is_avx, H); + CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, ctx->Htable, H); OPENSSL_memset(&ctx->S, 0, sizeof(ctx->S)); } diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/ctrdrbg.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/ctrdrbg.cc.inc index 001993de..9bbc99f9 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/ctrdrbg.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/ctrdrbg.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/internal.h index b661d123..c9be714b 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/rand.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/rand.cc.inc index e8a42198..665364fc 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/rand.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rand/rand.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -169,12 +169,12 @@ static int rdrand(uint8_t *buf, size_t len) { return 0; } bcm_status BCM_rand_bytes_hwrng(uint8_t *buf, const size_t len) { if (!have_rdrand()) { - return bcm_status_failure; + return bcm_status::failure; } if (rdrand(buf, len)) { - return bcm_status_not_approved; + return bcm_status::not_approved; } - return bcm_status_failure; + return bcm_status::failure; } #if defined(BORINGSSL_FIPS) @@ -213,7 +213,7 @@ bcm_infallible BCM_rand_load_entropy(const uint8_t *entropy, size_t entropy_len, buffer->bytes_valid += entropy_len; buffer->want_additional_input |= want_additional_input && (entropy_len != 0); CRYPTO_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); - return bcm_infallible_not_approved; + return bcm_infallible::not_approved; } // get_seed_entropy fills |out_entropy_len| bytes of |out_entropy| from the @@ -330,7 +330,7 @@ static void rand_get_seed(struct rand_thread_state *state, bcm_infallible BCM_rand_bytes_with_additional_data( uint8_t *out, size_t out_len, const uint8_t user_additional_data[32]) { if (out_len == 0) { - return bcm_infallible_approved; + return bcm_infallible::approved; } const uint64_t fork_generation = CRYPTO_get_fork_generation(); @@ -471,11 +471,11 @@ bcm_infallible BCM_rand_bytes_with_additional_data( #if defined(BORINGSSL_FIPS) CRYPTO_MUTEX_unlock_read(&state->clear_drbg_lock); #endif - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_rand_bytes(uint8_t *out, size_t out_len) { static const uint8_t kZeroAdditionalData[32] = {0}; BCM_rand_bytes_with_additional_data(out, out_len, kZeroAdditionalData); - return bcm_infallible_approved; + return bcm_infallible::approved; } diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rsa/padding.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rsa/padding.cc.inc index ce9b2862..bc32f584 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/rsa/padding.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/rsa/padding.cc.inc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2005. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2005. */ /* ==================================================================== * Copyright (c) 2005 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/fips.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/fips.cc.inc index 382dfad0..976608b8 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/fips.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/fips.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/self_check.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/self_check.cc.inc index 2b5a772d..344d319f 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/self_check.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/self_check/self_check.cc.inc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/service_indicator/service_indicator.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/service_indicator/service_indicator.cc.inc index 77f72204..36a5f9fb 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/service_indicator/service_indicator.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/service_indicator/service_indicator.cc.inc @@ -186,7 +186,7 @@ static int is_md_fips_approved_for_verifying(int md_type) { static void evp_md_ctx_verify_service_indicator(const EVP_MD_CTX *ctx, int (*md_ok)(int md_type)) { - if (EVP_MD_CTX_md(ctx) == NULL) { + if (EVP_MD_CTX_get0_md(ctx) == NULL) { // Signature schemes without a prehash are currently never FIPS approved. return; } diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/internal.h index bfdbc776..c07a3691 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha1.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha1.cc.inc index a15c40b2..725e1a3e 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha1.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha1.cc.inc @@ -72,7 +72,7 @@ bcm_infallible BCM_sha1_init(SHA_CTX *sha) { sha->h[2] = 0x98badcfeUL; sha->h[3] = 0x10325476UL; sha->h[4] = 0xc3d2e1f0UL; - return bcm_infallible_approved; + return bcm_infallible::approved; } #if !defined(SHA1_ASM) @@ -82,14 +82,14 @@ static void sha1_block_data_order(uint32_t state[5], const uint8_t *data, bcm_infallible BCM_sha1_transform(SHA_CTX *c, const uint8_t data[SHA_CBLOCK]) { sha1_block_data_order(c->h, data, 1); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha1_update(SHA_CTX *c, const void *data, size_t len) { crypto_md32_update(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); - return bcm_infallible_approved; + return bcm_infallible::approved; } static void sha1_output_state(uint8_t out[SHA_DIGEST_LENGTH], @@ -107,7 +107,7 @@ bcm_infallible BCM_sha1_final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *c) { sha1_output_state(out, c); FIPS_service_indicator_update_state(); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_fips_186_2_prf(uint8_t *out, size_t out_len, @@ -144,7 +144,7 @@ bcm_infallible BCM_fips_186_2_prf(uint8_t *out, size_t out_len, out += SHA_DIGEST_LENGTH; out_len -= SHA_DIGEST_LENGTH; } - return bcm_infallible_not_approved; + return bcm_infallible::not_approved; } #define Xupdate(a, ix, ia, ib, ic, id) \ diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha256.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha256.cc.inc index bba5276d..4d331a5e 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha256.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha256.cc.inc @@ -76,7 +76,7 @@ bcm_infallible BCM_sha224_init(SHA256_CTX *sha) { sha->h[6] = 0x64f98fa7UL; sha->h[7] = 0xbefa4fa4UL; sha->md_len = BCM_SHA224_DIGEST_LENGTH; - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha256_init(SHA256_CTX *sha) { @@ -90,7 +90,7 @@ bcm_infallible BCM_sha256_init(SHA256_CTX *sha) { sha->h[6] = 0x1f83d9abUL; sha->h[7] = 0x5be0cd19UL; sha->md_len = BCM_SHA256_DIGEST_LENGTH; - return bcm_infallible_approved; + return bcm_infallible::approved; } #if !defined(SHA256_ASM) @@ -101,14 +101,14 @@ static void sha256_block_data_order(uint32_t state[8], const uint8_t *in, bcm_infallible BCM_sha256_transform(SHA256_CTX *c, const uint8_t data[BCM_SHA256_CBLOCK]) { sha256_block_data_order(c->h, data, 1); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha256_update(SHA256_CTX *c, const void *data, size_t len) { crypto_md32_update(&sha256_block_data_order, c->h, c->data, BCM_SHA256_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha224_update(SHA256_CTX *ctx, const void *data, @@ -140,7 +140,7 @@ bcm_infallible BCM_sha256_final(uint8_t out[BCM_SHA256_DIGEST_LENGTH], // // TODO(davidben): Add an assert and fix code to match them up. sha256_final_impl(out, c->md_len, c); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha224_final(uint8_t out[BCM_SHA224_DIGEST_LENGTH], @@ -149,7 +149,7 @@ bcm_infallible BCM_sha224_final(uint8_t out[BCM_SHA224_DIGEST_LENGTH], // to |BCM_SHA224_DIGEST_LENGTH|. assert(ctx->md_len == BCM_SHA224_DIGEST_LENGTH); sha256_final_impl(out, BCM_SHA224_DIGEST_LENGTH, ctx); - return bcm_infallible_approved; + return bcm_infallible::approved; } #if !defined(SHA256_ASM) @@ -328,7 +328,7 @@ bcm_infallible BCM_sha256_transform_blocks(uint32_t state[8], const uint8_t *data, size_t num_blocks) { sha256_block_data_order(state, data, num_blocks); - return bcm_infallible_approved; + return bcm_infallible::approved; } #undef Sigma0 diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha512.cc.inc b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha512.cc.inc index 2e2d3ab5..3b4c06f5 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha512.cc.inc +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/sha/sha512.cc.inc @@ -86,7 +86,7 @@ bcm_infallible BCM_sha384_init(SHA512_CTX *sha) { sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA384_DIGEST_LENGTH; - return bcm_infallible_approved; + return bcm_infallible::approved; } @@ -104,7 +104,7 @@ bcm_infallible BCM_sha512_init(SHA512_CTX *sha) { sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA512_DIGEST_LENGTH; - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha512_256_init(SHA512_CTX *sha) { @@ -121,7 +121,7 @@ bcm_infallible BCM_sha512_256_init(SHA512_CTX *sha) { sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA512_256_DIGEST_LENGTH; - return bcm_infallible_approved; + return bcm_infallible::approved; } #if !defined(SHA512_ASM) @@ -136,7 +136,7 @@ bcm_infallible BCM_sha384_final(uint8_t out[BCM_SHA384_DIGEST_LENGTH], // |sha->md_len| to |BCM_SHA384_DIGEST_LENGTH|. assert(sha->md_len == BCM_SHA384_DIGEST_LENGTH); sha512_final_impl(out, BCM_SHA384_DIGEST_LENGTH, sha); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha384_update(SHA512_CTX *sha, const void *data, @@ -155,13 +155,13 @@ bcm_infallible BCM_sha512_256_final(uint8_t out[BCM_SHA512_256_DIGEST_LENGTH], // |sha->md_len| to |BCM_SHA512_256_DIGEST_LENGTH|. assert(sha->md_len == BCM_SHA512_256_DIGEST_LENGTH); sha512_final_impl(out, BCM_SHA512_256_DIGEST_LENGTH, sha); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha512_transform(SHA512_CTX *c, const uint8_t block[SHA512_CBLOCK]) { sha512_block_data_order(c->h, block, 1); - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha512_update(SHA512_CTX *c, const void *in_data, @@ -171,7 +171,7 @@ bcm_infallible BCM_sha512_update(SHA512_CTX *c, const void *in_data, const uint8_t *data = reinterpret_cast(in_data); if (len == 0) { - return bcm_infallible_approved; + return bcm_infallible::approved; } l = (c->Nl + (((uint64_t)len) << 3)) & UINT64_C(0xffffffffffffffff); @@ -189,7 +189,7 @@ bcm_infallible BCM_sha512_update(SHA512_CTX *c, const void *in_data, if (len < n) { OPENSSL_memcpy(p + c->num, data, len); c->num += (unsigned int)len; - return bcm_infallible_approved; + return bcm_infallible::approved; } else { OPENSSL_memcpy(p + c->num, data, n), c->num = 0; len -= n; @@ -210,7 +210,7 @@ bcm_infallible BCM_sha512_update(SHA512_CTX *c, const void *in_data, c->num = (int)len; } - return bcm_infallible_approved; + return bcm_infallible::approved; } bcm_infallible BCM_sha512_final(uint8_t out[BCM_SHA512_DIGEST_LENGTH], @@ -221,7 +221,7 @@ bcm_infallible BCM_sha512_final(uint8_t out[BCM_SHA512_DIGEST_LENGTH], // // TODO(davidben): Add an assert and fix code to match them up. sha512_final_impl(out, sha->md_len, sha); - return bcm_infallible_approved; + return bcm_infallible::approved; } static void sha512_final_impl(uint8_t *out, size_t md_len, SHA512_CTX *sha) { diff --git a/Sources/CCryptoBoringSSL/crypto/fipsmodule/tls/internal.h b/Sources/CCryptoBoringSSL/crypto/fipsmodule/tls/internal.h index 46dadf88..e712bbed 100644 --- a/Sources/CCryptoBoringSSL/crypto/fipsmodule/tls/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/fipsmodule/tls/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/hpke/hpke.cc b/Sources/CCryptoBoringSSL/crypto/hpke/hpke.cc index 592564ac..0282c69a 100644 --- a/Sources/CCryptoBoringSSL/crypto/hpke/hpke.cc +++ b/Sources/CCryptoBoringSSL/crypto/hpke/hpke.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/hrss/hrss.cc b/Sources/CCryptoBoringSSL/crypto/hrss/hrss.cc index 49df5279..80f37640 100644 --- a/Sources/CCryptoBoringSSL/crypto/hrss/hrss.cc +++ b/Sources/CCryptoBoringSSL/crypto/hrss/hrss.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/hrss/internal.h b/Sources/CCryptoBoringSSL/crypto/hrss/internal.h index 0d75177e..bff3d51c 100644 --- a/Sources/CCryptoBoringSSL/crypto/hrss/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/hrss/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/internal.h b/Sources/CCryptoBoringSSL/crypto/internal.h index b6a5d168..b18cf08d 100644 --- a/Sources/CCryptoBoringSSL/crypto/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/internal.h @@ -1537,7 +1537,9 @@ OPENSSL_INLINE int CRYPTO_is_ARMv8_SHA512_capable(void) { // 3: aes_hw_set_encrypt_key // 4: vpaes_encrypt // 5: vpaes_set_encrypt_key -extern uint8_t BORINGSSL_function_hit[7]; +// 6: aes_gcm_enc_update_vaes_avx10_256 +// 7: aes_gcm_enc_update_vaes_avx10_512 +extern uint8_t BORINGSSL_function_hit[8]; #endif // BORINGSSL_DISPATCH_TEST // OPENSSL_vasprintf_internal is just like |vasprintf(3)|. If |system_malloc| is @@ -1558,49 +1560,33 @@ OPENSSL_EXPORT int OPENSSL_vasprintf_internal(char **str, const char *format, // bit. |carry| must be zero or one. #if OPENSSL_HAS_BUILTIN(__builtin_addc) -template -struct CRYPTO_addc_impl { - static_assert(sizeof(T) == 0, "Unsupported type for addc operation"); -}; - -template <> -struct CRYPTO_addc_impl { - static unsigned int add(unsigned int x, unsigned int y, unsigned int carry, - unsigned int *out_carry) { - return __builtin_addc(x, y, carry, out_carry); - } -}; - -template <> -struct CRYPTO_addc_impl { - static unsigned long add(unsigned long x, unsigned long y, - unsigned long carry, unsigned long *out_carry) { - return __builtin_addcl(x, y, carry, out_carry); - } -}; +inline unsigned int CRYPTO_addc_impl(unsigned int x, unsigned int y, + unsigned int carry, + unsigned int *out_carry) { + return __builtin_addc(x, y, carry, out_carry); +} -template <> -struct CRYPTO_addc_impl { - static unsigned long long add(unsigned long long x, unsigned long long y, - unsigned long long carry, - unsigned long long *out_carry) { - return __builtin_addcll(x, y, carry, out_carry); - } -}; +inline unsigned long CRYPTO_addc_impl(unsigned long x, unsigned long y, + unsigned long carry, + unsigned long *out_carry) { + return __builtin_addcl(x, y, carry, out_carry); +} -template -inline T CRYPTO_addc(T x, T y, T carry, T *out_carry) { - return CRYPTO_addc_impl::add(x, y, carry, out_carry); +inline unsigned long long CRYPTO_addc_impl(unsigned long long x, + unsigned long long y, + unsigned long long carry, + unsigned long long *out_carry) { + return __builtin_addcll(x, y, carry, out_carry); } inline uint32_t CRYPTO_addc_u32(uint32_t x, uint32_t y, uint32_t carry, uint32_t *out_carry) { - return CRYPTO_addc(x, y, carry, out_carry); + return CRYPTO_addc_impl(x, y, carry, out_carry); } inline uint64_t CRYPTO_addc_u64(uint64_t x, uint64_t y, uint64_t carry, uint64_t *out_carry) { - return CRYPTO_addc(x, y, carry, out_carry); + return CRYPTO_addc_impl(x, y, carry, out_carry); } #else @@ -1638,49 +1624,33 @@ static inline uint64_t CRYPTO_addc_u64(uint64_t x, uint64_t y, uint64_t carry, // bit. |borrow| must be zero or one. #if OPENSSL_HAS_BUILTIN(__builtin_subc) -template -struct CRYPTO_subc_impl { - static_assert(sizeof(T) == 0, "Unsupported type for subc operation"); -}; - -template <> -struct CRYPTO_subc_impl { - static unsigned int sub(unsigned int x, unsigned int y, unsigned int borrow, - unsigned int *out_borrow) { - return __builtin_subc(x, y, borrow, out_borrow); - } -}; - -template <> -struct CRYPTO_subc_impl { - static unsigned long sub(unsigned long x, unsigned long y, - unsigned long borrow, unsigned long *out_borrow) { - return __builtin_subcl(x, y, borrow, out_borrow); - } -}; +inline unsigned int CRYPTO_subc_impl(unsigned int x, unsigned int y, + unsigned int borrow, + unsigned int *out_borrow) { + return __builtin_subc(x, y, borrow, out_borrow); +} -template <> -struct CRYPTO_subc_impl { - static unsigned long long sub(unsigned long long x, unsigned long long y, - unsigned long long borrow, - unsigned long long *out_borrow) { - return __builtin_subcll(x, y, borrow, out_borrow); - } -}; +inline unsigned long CRYPTO_subc_impl(unsigned long x, unsigned long y, + unsigned long borrow, + unsigned long *out_borrow) { + return __builtin_subcl(x, y, borrow, out_borrow); +} -template -inline T CRYPTO_subc(T x, T y, T borrow, T *out_borrow) { - return CRYPTO_subc_impl::sub(x, y, borrow, out_borrow); +inline unsigned long long CRYPTO_subc_impl(unsigned long long x, + unsigned long long y, + unsigned long long borrow, + unsigned long long *out_borrow) { + return __builtin_subcll(x, y, borrow, out_borrow); } inline uint32_t CRYPTO_subc_u32(uint32_t x, uint32_t y, uint32_t borrow, uint32_t *out_borrow) { - return CRYPTO_subc(x, y, borrow, out_borrow); + return CRYPTO_subc_impl(x, y, borrow, out_borrow); } inline uint64_t CRYPTO_subc_u64(uint64_t x, uint64_t y, uint64_t borrow, uint64_t *out_borrow) { - return CRYPTO_subc(x, y, borrow, out_borrow); + return CRYPTO_subc_impl(x, y, borrow, out_borrow); } #else diff --git a/Sources/CCryptoBoringSSL/crypto/kyber/internal.h b/Sources/CCryptoBoringSSL/crypto/kyber/internal.h index bb95cfe4..bdd49509 100644 --- a/Sources/CCryptoBoringSSL/crypto/kyber/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/kyber/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/kyber/kyber.cc b/Sources/CCryptoBoringSSL/crypto/kyber/kyber.cc index bd860118..820d8ca8 100644 --- a/Sources/CCryptoBoringSSL/crypto/kyber/kyber.cc +++ b/Sources/CCryptoBoringSSL/crypto/kyber/kyber.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -21,8 +21,8 @@ #include #include +#include "../fipsmodule/keccak/internal.h" #include "../internal.h" -#include "../keccak/internal.h" #include "./internal.h" diff --git a/Sources/CCryptoBoringSSL/crypto/md4/md4.cc b/Sources/CCryptoBoringSSL/crypto/md4/md4.cc index f50ceb04..2b5e1368 100644 --- a/Sources/CCryptoBoringSSL/crypto/md4/md4.cc +++ b/Sources/CCryptoBoringSSL/crypto/md4/md4.cc @@ -59,7 +59,7 @@ #include #include -#include "../crypto/fipsmodule/digest/md32_common.h" +#include "../fipsmodule/digest/md32_common.h" #include "../internal.h" @@ -83,7 +83,8 @@ int MD4_Init(MD4_CTX *md4) { return 1; } -void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num); +static void md4_block_data_order(uint32_t *state, const uint8_t *data, + size_t num); void MD4_Transform(MD4_CTX *c, const uint8_t data[MD4_CBLOCK]) { md4_block_data_order(c->h, data, 1); @@ -132,7 +133,8 @@ int MD4_Final(uint8_t out[MD4_DIGEST_LENGTH], MD4_CTX *c) { (a) = CRYPTO_rotl_u32(a, s); \ } while (0) -void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { +static void md4_block_data_order(uint32_t *state, const uint8_t *data, + size_t num) { uint32_t A, B, C, D; uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15; diff --git a/Sources/CCryptoBoringSSL/crypto/md5/internal.h b/Sources/CCryptoBoringSSL/crypto/md5/internal.h index 21cc7b10..8c96935e 100644 --- a/Sources/CCryptoBoringSSL/crypto/md5/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/md5/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/mldsa/internal.h b/Sources/CCryptoBoringSSL/crypto/mldsa/internal.h deleted file mode 100644 index 874d65d7..00000000 --- a/Sources/CCryptoBoringSSL/crypto/mldsa/internal.h +++ /dev/null @@ -1,76 +0,0 @@ -/* Copyright (c) 2024, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_MLDSA_INTERNAL_H -#define OPENSSL_HEADER_CRYPTO_MLDSA_INTERNAL_H - -#include -#include - -#if defined(__cplusplus) -extern "C" { -#endif - - -// MLDSA_SIGNATURE_RANDOMIZER_BYTES is the number of bytes of uniformly -// random entropy necessary to generate a signature in randomized mode. -#define MLDSA_SIGNATURE_RANDOMIZER_BYTES 32 - - -// ML-DSA-65 - -// MLDSA65_generate_key_external_entropy generates a public/private key pair -// using the given seed, writes the encoded public key to -// |out_encoded_public_key| and sets |out_private_key| to the private key. -// It returns 1 on success and 0 on failure. -OPENSSL_EXPORT int MLDSA65_generate_key_external_entropy( - uint8_t out_encoded_public_key[MLDSA65_PUBLIC_KEY_BYTES], - struct MLDSA65_private_key *out_private_key, - const uint8_t entropy[MLDSA_SEED_BYTES]); - -// MLDSA65_sign_internal signs |msg| using |private_key| and writes the -// signature to |out_encoded_signature|. The |context_prefix| and |context| are -// prefixed to the message, in that order, before signing. The |randomizer| -// value can be set to zero bytes in order to make a deterministic signature, or -// else filled with entropy for the usual |MLDSA_sign| behavior. It returns 1 on -// success and 0 on error. -OPENSSL_EXPORT int MLDSA65_sign_internal( - uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], - const struct MLDSA65_private_key *private_key, const uint8_t *msg, - size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, - const uint8_t *context, size_t context_len, - const uint8_t randomizer[MLDSA_SIGNATURE_RANDOMIZER_BYTES]); - -// MLDSA65_verify_internal verifies that |encoded_signature| is a valid -// signature of |msg| by |public_key|. The |context_prefix| and |context| are -// prefixed to the message before verification, in that order. It returns 1 on -// success and 0 on error. -OPENSSL_EXPORT int MLDSA65_verify_internal( - const struct MLDSA65_public_key *public_key, - const uint8_t encoded_signature[MLDSA65_SIGNATURE_BYTES], - const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, - size_t context_prefix_len, const uint8_t *context, size_t context_len); - -// MLDSA65_marshal_private_key serializes |private_key| to |out| in the -// NIST format for ML-DSA-65 private keys. It returns 1 on success or 0 -// on allocation error. -OPENSSL_EXPORT int MLDSA65_marshal_private_key( - CBB *out, const struct MLDSA65_private_key *private_key); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_MLDSA_INTERNAL_H diff --git a/Sources/CCryptoBoringSSL/crypto/mldsa/mldsa.cc b/Sources/CCryptoBoringSSL/crypto/mldsa/mldsa.cc index 6ca75bde..cfc12510 100644 --- a/Sources/CCryptoBoringSSL/crypto/mldsa/mldsa.cc +++ b/Sources/CCryptoBoringSSL/crypto/mldsa/mldsa.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,1733 +14,45 @@ #include -#include +#include "../fipsmodule/bcm_interface.h" -#include -#include +static_assert(sizeof(BCM_mldsa65_private_key) == sizeof(MLDSA65_private_key), + ""); +static_assert(alignof(BCM_mldsa65_private_key) == alignof(MLDSA65_private_key), + ""); +static_assert(sizeof(BCM_mldsa65_public_key) == sizeof(MLDSA65_public_key), ""); +static_assert(alignof(BCM_mldsa65_public_key) == alignof(MLDSA65_public_key), + ""); +static_assert(MLDSA_SEED_BYTES == BCM_MLDSA_SEED_BYTES, ""); +static_assert(MLDSA65_PRIVATE_KEY_BYTES == BCM_MLDSA65_PRIVATE_KEY_BYTES, ""); +static_assert(MLDSA65_PUBLIC_KEY_BYTES == BCM_MLDSA65_PUBLIC_KEY_BYTES, ""); +static_assert(MLDSA65_SIGNATURE_BYTES == BCM_MLDSA65_SIGNATURE_BYTES, ""); -#include -#include -#include - -#include "../internal.h" -#include "../keccak/internal.h" -#include "./internal.h" - -namespace { - -constexpr int kDegree = 256; -constexpr int kRhoBytes = 32; -constexpr int kSigmaBytes = 64; -constexpr int kKBytes = 32; -constexpr int kTrBytes = 64; -constexpr int kMuBytes = 64; -constexpr int kRhoPrimeBytes = 64; - -// 2^23 - 2^13 + 1 -constexpr uint32_t kPrime = 8380417; -// Inverse of -kPrime modulo 2^32 -constexpr uint32_t kPrimeNegInverse = 4236238847; -constexpr int kDroppedBits = 13; -constexpr uint32_t kHalfPrime = (kPrime - 1) / 2; -constexpr uint32_t kGamma2 = (kPrime - 1) / 32; -// 256^-1 mod kPrime, in Montgomery form. -constexpr uint32_t kInverseDegreeMontgomery = 41978; - -// Constants that vary depending on ML-DSA size. -// -// These are implemented as templates which take the K parameter to distinguish -// the ML-DSA sizes. (At the time of writing, `if constexpr` was not available.) -// -// TODO(crbug.com/42290600): Switch this to `if constexpr` when C++17 is -// available. - -template -constexpr size_t public_key_bytes(); - -template <> -constexpr size_t public_key_bytes<6>() { - return MLDSA65_PUBLIC_KEY_BYTES; -} - -template -constexpr size_t signature_bytes(); - -template <> -constexpr size_t signature_bytes<6>() { - return MLDSA65_SIGNATURE_BYTES; -} - -template -constexpr int tau(); - -template <> -constexpr int tau<6>() { - return 49; -} - -template -constexpr int lambda_bytes(); - -template <> -constexpr int lambda_bytes<6>() { - return 192 / 8; -} - -template -constexpr int gamma1(); - -template <> -constexpr int gamma1<6>() { - return 1 << 19; -} - -template -constexpr int beta(); - -template <> -constexpr int beta<6>() { - return 196; -} - -template -constexpr int omega(); - -template <> -constexpr int omega<6>() { - return 55; -} - -template -constexpr int eta(); - -template <> -constexpr int eta<6>() { - return 4; -} - -template -constexpr int plus_minus_eta_bitlen(); - -template <> -constexpr int plus_minus_eta_bitlen<6>() { - return 4; -} - -// Fundamental types. - -typedef struct scalar { - uint32_t c[kDegree]; -} scalar; - -template -struct vector { - scalar v[K]; -}; - -template -struct matrix { - scalar v[K][L]; -}; - -/* Arithmetic */ - -// This bit of Python will be referenced in some of the following comments: -// -// q = 8380417 -// # Inverse of -q modulo 2^32 -// q_neg_inverse = 4236238847 -// # 2^64 modulo q -// montgomery_square = 2365951 -// -// def bitreverse(i): -// ret = 0 -// for n in range(8): -// bit = i & 1 -// ret <<= 1 -// ret |= bit -// i >>= 1 -// return ret -// -// def montgomery_reduce(x): -// a = (x * q_neg_inverse) % 2**32 -// b = x + a * q -// assert b & 0xFFFF_FFFF == 0 -// c = b >> 32 -// assert c < q -// return c -// -// def montgomery_transform(x): -// return montgomery_reduce(x * montgomery_square) - -// kNTTRootsMontgomery = [ -// montgomery_transform(pow(1753, bitreverse(i), q)) for i in range(256) -// ] -static const uint32_t kNTTRootsMontgomery[256] = { - 4193792, 25847, 5771523, 7861508, 237124, 7602457, 7504169, 466468, - 1826347, 2353451, 8021166, 6288512, 3119733, 5495562, 3111497, 2680103, - 2725464, 1024112, 7300517, 3585928, 7830929, 7260833, 2619752, 6271868, - 6262231, 4520680, 6980856, 5102745, 1757237, 8360995, 4010497, 280005, - 2706023, 95776, 3077325, 3530437, 6718724, 4788269, 5842901, 3915439, - 4519302, 5336701, 3574422, 5512770, 3539968, 8079950, 2348700, 7841118, - 6681150, 6736599, 3505694, 4558682, 3507263, 6239768, 6779997, 3699596, - 811944, 531354, 954230, 3881043, 3900724, 5823537, 2071892, 5582638, - 4450022, 6851714, 4702672, 5339162, 6927966, 3475950, 2176455, 6795196, - 7122806, 1939314, 4296819, 7380215, 5190273, 5223087, 4747489, 126922, - 3412210, 7396998, 2147896, 2715295, 5412772, 4686924, 7969390, 5903370, - 7709315, 7151892, 8357436, 7072248, 7998430, 1349076, 1852771, 6949987, - 5037034, 264944, 508951, 3097992, 44288, 7280319, 904516, 3958618, - 4656075, 8371839, 1653064, 5130689, 2389356, 8169440, 759969, 7063561, - 189548, 4827145, 3159746, 6529015, 5971092, 8202977, 1315589, 1341330, - 1285669, 6795489, 7567685, 6940675, 5361315, 4499357, 4751448, 3839961, - 2091667, 3407706, 2316500, 3817976, 5037939, 2244091, 5933984, 4817955, - 266997, 2434439, 7144689, 3513181, 4860065, 4621053, 7183191, 5187039, - 900702, 1859098, 909542, 819034, 495491, 6767243, 8337157, 7857917, - 7725090, 5257975, 2031748, 3207046, 4823422, 7855319, 7611795, 4784579, - 342297, 286988, 5942594, 4108315, 3437287, 5038140, 1735879, 203044, - 2842341, 2691481, 5790267, 1265009, 4055324, 1247620, 2486353, 1595974, - 4613401, 1250494, 2635921, 4832145, 5386378, 1869119, 1903435, 7329447, - 7047359, 1237275, 5062207, 6950192, 7929317, 1312455, 3306115, 6417775, - 7100756, 1917081, 5834105, 7005614, 1500165, 777191, 2235880, 3406031, - 7838005, 5548557, 6709241, 6533464, 5796124, 4656147, 594136, 4603424, - 6366809, 2432395, 2454455, 8215696, 1957272, 3369112, 185531, 7173032, - 5196991, 162844, 1616392, 3014001, 810149, 1652634, 4686184, 6581310, - 5341501, 3523897, 3866901, 269760, 2213111, 7404533, 1717735, 472078, - 7953734, 1723600, 6577327, 1910376, 6712985, 7276084, 8119771, 4546524, - 5441381, 6144432, 7959518, 6094090, 183443, 7403526, 1612842, 4834730, - 7826001, 3919660, 8332111, 7018208, 3937738, 1400424, 7534263, 1976782}; - -// Reduces x mod kPrime in constant time, where 0 <= x < 2*kPrime. -static uint32_t reduce_once(uint32_t x) { - declassify_assert(x < 2 * kPrime); - // return x < kPrime ? x : x - kPrime; - return constant_time_select_int(constant_time_lt_w(x, kPrime), x, x - kPrime); -} - -// Returns the absolute value in constant time. -static uint32_t abs_signed(uint32_t x) { - // return is_positive(x) ? x : -x; - // Note: MSVC doesn't like applying the unary minus operator to unsigned types - // (warning C4146), so we write the negation as a bitwise not plus one - // (assuming two's complement representation). - return constant_time_select_int(constant_time_lt_w(x, 0x80000000), x, 0u - x); -} - -// Returns the absolute value modulo kPrime. -static uint32_t abs_mod_prime(uint32_t x) { - declassify_assert(x < kPrime); - // return x > kHalfPrime ? kPrime - x : x; - return constant_time_select_int(constant_time_lt_w(kHalfPrime, x), kPrime - x, - x); -} - -// Returns the maximum of two values in constant time. -static uint32_t maximum(uint32_t x, uint32_t y) { - // return x < y ? y : x; - return constant_time_select_int(constant_time_lt_w(x, y), y, x); -} - -static uint32_t mod_sub(uint32_t a, uint32_t b) { - declassify_assert(a < kPrime); - declassify_assert(b < kPrime); - return reduce_once(kPrime + a - b); -} - -static void scalar_add(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); - } -} - -static void scalar_sub(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = mod_sub(lhs->c[i], rhs->c[i]); - } -} - -static uint32_t reduce_montgomery(uint64_t x) { - declassify_assert(x <= ((uint64_t)kPrime << 32)); - uint64_t a = (uint32_t)x * kPrimeNegInverse; - uint64_t b = x + a * kPrime; - declassify_assert((b & 0xffffffff) == 0); - uint32_t c = b >> 32; - return reduce_once(c); -} - -// Multiply two scalars in the number theoretically transformed state. -static void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = reduce_montgomery((uint64_t)lhs->c[i] * (uint64_t)rhs->c[i]); - } -} - -// In place number theoretic transform of a given scalar. -// -// FIPS 204, Algorithm 41 (`NTT`). -static void scalar_ntt(scalar *s) { - // Step: 1, 2, 4, 8, ..., 128 - // Offset: 128, 64, 32, 16, ..., 1 - int offset = kDegree; - for (int step = 1; step < kDegree; step <<= 1) { - offset >>= 1; - int k = 0; - for (int i = 0; i < step; i++) { - assert(k == 2 * offset * i); - const uint32_t step_root = kNTTRootsMontgomery[step + i]; - for (int j = k; j < k + offset; j++) { - uint32_t even = s->c[j]; - // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. - // |step_root| < kPrime because it's static data. |s->c[...]| is < - // kPrime by the invariants of that struct. - uint32_t odd = - reduce_montgomery((uint64_t)step_root * (uint64_t)s->c[j + offset]); - s->c[j] = reduce_once(odd + even); - s->c[j + offset] = mod_sub(even, odd); - } - k += 2 * offset; - } - } -} - -// In place inverse number theoretic transform of a given scalar. -// -// FIPS 204, Algorithm 42 (`NTT^-1`). -static void scalar_inverse_ntt(scalar *s) { - // Step: 128, 64, 32, 16, ..., 1 - // Offset: 1, 2, 4, 8, ..., 128 - int step = kDegree; - for (int offset = 1; offset < kDegree; offset <<= 1) { - step >>= 1; - int k = 0; - for (int i = 0; i < step; i++) { - assert(k == 2 * offset * i); - const uint32_t step_root = - kPrime - kNTTRootsMontgomery[step + (step - 1 - i)]; - for (int j = k; j < k + offset; j++) { - uint32_t even = s->c[j]; - uint32_t odd = s->c[j + offset]; - s->c[j] = reduce_once(odd + even); - - // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. - // kPrime + even < 2*kPrime because |even| < kPrime, by the invariants - // of that structure. Thus kPrime + even - odd < 2*kPrime because odd >= - // 0, because it's unsigned and less than kPrime. Lastly step_root < - // kPrime, because |kNTTRootsMontgomery| is static data. - s->c[j + offset] = reduce_montgomery((uint64_t)step_root * - (uint64_t)(kPrime + even - odd)); - } - k += 2 * offset; - } - } - for (int i = 0; i < kDegree; i++) { - s->c[i] = reduce_montgomery((uint64_t)s->c[i] * - (uint64_t)kInverseDegreeMontgomery); - } -} - -template -static void vector_zero(vector *out) { - OPENSSL_memset(out, 0, sizeof(*out)); -} - -template -static void vector_add(vector *out, const vector *lhs, - const vector *rhs) { - for (int i = 0; i < X; i++) { - scalar_add(&out->v[i], &lhs->v[i], &rhs->v[i]); - } -} - -template -static void vector_sub(vector *out, const vector *lhs, - const vector *rhs) { - for (int i = 0; i < X; i++) { - scalar_sub(&out->v[i], &lhs->v[i], &rhs->v[i]); - } -} - -template -static void vector_mult_scalar(vector *out, const vector *lhs, - const scalar *rhs) { - for (int i = 0; i < X; i++) { - scalar_mult(&out->v[i], &lhs->v[i], rhs); - } -} - -template -static void vector_ntt(vector *a) { - for (int i = 0; i < X; i++) { - scalar_ntt(&a->v[i]); - } -} - -template -static void vector_inverse_ntt(vector *a) { - for (int i = 0; i < X; i++) { - scalar_inverse_ntt(&a->v[i]); - } -} - -template -static void matrix_mult(vector *out, const matrix *m, - const vector *a) { - vector_zero(out); - for (int i = 0; i < K; i++) { - for (int j = 0; j < L; j++) { - scalar product; - scalar_mult(&product, &m->v[i][j], &a->v[j]); - scalar_add(&out->v[i], &out->v[i], &product); - } - } -} - -/* Rounding & hints */ - -// FIPS 204, Algorithm 35 (`Power2Round`). -static void power2_round(uint32_t *r1, uint32_t *r0, uint32_t r) { - *r1 = r >> kDroppedBits; - *r0 = r - (*r1 << kDroppedBits); - - uint32_t r0_adjusted = mod_sub(*r0, 1 << kDroppedBits); - uint32_t r1_adjusted = *r1 + 1; - - // Mask is set iff r0 > 2^(dropped_bits - 1). - crypto_word_t mask = - constant_time_lt_w((uint32_t)(1 << (kDroppedBits - 1)), *r0); - // r0 = mask ? r0_adjusted : r0 - *r0 = constant_time_select_int(mask, r0_adjusted, *r0); - // r1 = mask ? r1_adjusted : r1 - *r1 = constant_time_select_int(mask, r1_adjusted, *r1); -} - -// Scale back previously rounded value. -static void scale_power2_round(uint32_t *out, uint32_t r1) { - // Pre-condition: 0 <= r1 <= 2^10 - 1 - assert(r1 < (1u << 10)); - - *out = r1 << kDroppedBits; - - // Post-condition: 0 <= out <= 2^23 - 2^13 = kPrime - 1 - assert(*out < kPrime); -} - -// FIPS 204, Algorithm 37 (`HighBits`). -static uint32_t high_bits(uint32_t x) { - // Reference description (given 0 <= x < q): - // - // ``` - // int32_t r0 = x mod+- (2 * kGamma2); - // if (x - r0 == q - 1) { - // return 0; - // } else { - // return (x - r0) / (2 * kGamma2); - // } - // ``` - // - // Below is the formula taken from the reference implementation. - // - // Here, kGamma2 == 2^18 - 2^8 - // This returns ((ceil(x / 2^7) * (2^10 + 1) + 2^21) / 2^22) mod 2^4 - uint32_t r1 = (x + 127) >> 7; - r1 = (r1 * 1025 + (1 << 21)) >> 22; - r1 &= 15; - return r1; -} - -// FIPS 204, Algorithm 36 (`Decompose`). -static void decompose(uint32_t *r1, int32_t *r0, uint32_t r) { - *r1 = high_bits(r); - - *r0 = r; - *r0 -= *r1 * 2 * (int32_t)kGamma2; - *r0 -= (((int32_t)kHalfPrime - *r0) >> 31) & (int32_t)kPrime; -} - -// FIPS 204, Algorithm 38 (`LowBits`). -static int32_t low_bits(uint32_t x) { - uint32_t r1; - int32_t r0; - decompose(&r1, &r0, x); - return r0; -} - -// FIPS 204, Algorithm 39 (`MakeHint`). -// -// In the spec this takes two arguments, z and r, and is called with -// z = -ct0 -// r = w - cs2 + ct0 -// -// It then computes HighBits (algorithm 37) of z and z+r. But z+r is just w - -// cs2, so this takes three arguments and saves an addition. -static int32_t make_hint(uint32_t ct0, uint32_t cs2, uint32_t w) { - uint32_t r_plus_z = mod_sub(w, cs2); - uint32_t r = reduce_once(r_plus_z + ct0); - return high_bits(r) != high_bits(r_plus_z); -} - -// FIPS 204, Algorithm 40 (`UseHint`). -static uint32_t use_hint_vartime(uint32_t h, uint32_t r) { - uint32_t r1; - int32_t r0; - decompose(&r1, &r0, r); - - if (h) { - if (r0 > 0) { - // m = 16, thus |mod m| in the spec turns into |& 15|. - return (r1 + 1) & 15; - } else { - return (r1 - 1) & 15; - } - } - return r1; -} - -static void scalar_power2_round(scalar *s1, scalar *s0, const scalar *s) { - for (int i = 0; i < kDegree; i++) { - power2_round(&s1->c[i], &s0->c[i], s->c[i]); - } -} - -static void scalar_scale_power2_round(scalar *out, const scalar *in) { - for (int i = 0; i < kDegree; i++) { - scale_power2_round(&out->c[i], in->c[i]); - } -} - -static void scalar_high_bits(scalar *out, const scalar *in) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = high_bits(in->c[i]); - } -} - -static void scalar_low_bits(scalar *out, const scalar *in) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = low_bits(in->c[i]); - } -} - -static void scalar_max(uint32_t *max, const scalar *s) { - for (int i = 0; i < kDegree; i++) { - uint32_t abs = abs_mod_prime(s->c[i]); - *max = maximum(*max, abs); - } -} - -static void scalar_max_signed(uint32_t *max, const scalar *s) { - for (int i = 0; i < kDegree; i++) { - uint32_t abs = abs_signed(s->c[i]); - *max = maximum(*max, abs); - } -} - -static void scalar_make_hint(scalar *out, const scalar *ct0, const scalar *cs2, - const scalar *w) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = make_hint(ct0->c[i], cs2->c[i], w->c[i]); - } -} - -static void scalar_use_hint_vartime(scalar *out, const scalar *h, - const scalar *r) { - for (int i = 0; i < kDegree; i++) { - out->c[i] = use_hint_vartime(h->c[i], r->c[i]); - } -} - -template -static void vector_power2_round(vector *t1, vector *t0, - const vector *t) { - for (int i = 0; i < X; i++) { - scalar_power2_round(&t1->v[i], &t0->v[i], &t->v[i]); - } -} - -template -static void vector_scale_power2_round(vector *out, const vector *in) { - for (int i = 0; i < X; i++) { - scalar_scale_power2_round(&out->v[i], &in->v[i]); - } -} - -template -static void vector_high_bits(vector *out, const vector *in) { - for (int i = 0; i < X; i++) { - scalar_high_bits(&out->v[i], &in->v[i]); - } -} - -template -static void vector_low_bits(vector *out, const vector *in) { - for (int i = 0; i < X; i++) { - scalar_low_bits(&out->v[i], &in->v[i]); - } -} - -template -static uint32_t vector_max(const vector *a) { - uint32_t max = 0; - for (int i = 0; i < X; i++) { - scalar_max(&max, &a->v[i]); - } - return max; -} - -template -static uint32_t vector_max_signed(const vector *a) { - uint32_t max = 0; - for (int i = 0; i < X; i++) { - scalar_max_signed(&max, &a->v[i]); - } - return max; -} - -// The input vector contains only zeroes and ones. -template -static size_t vector_count_ones(const vector *a) { - size_t count = 0; - for (int i = 0; i < X; i++) { - for (int j = 0; j < kDegree; j++) { - count += a->v[i].c[j]; - } - } - return count; -} - -template -static void vector_make_hint(vector *out, const vector *ct0, - const vector *cs2, const vector *w) { - for (int i = 0; i < X; i++) { - scalar_make_hint(&out->v[i], &ct0->v[i], &cs2->v[i], &w->v[i]); - } -} - -template -static void vector_use_hint_vartime(vector *out, const vector *h, - const vector *r) { - for (int i = 0; i < X; i++) { - scalar_use_hint_vartime(&out->v[i], &h->v[i], &r->v[i]); - } -} - -/* Bit packing */ - -// FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 4. -static void scalar_encode_4(uint8_t out[128], const scalar *s) { - // Every two elements lands on a byte boundary. - static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); - for (int i = 0; i < kDegree / 2; i++) { - uint32_t a = s->c[2 * i]; - uint32_t b = s->c[2 * i + 1]; - declassify_assert(a < 16); - declassify_assert(b < 16); - out[i] = a | (b << 4); - } -} - -// FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 10. -static void scalar_encode_10(uint8_t out[320], const scalar *s) { - // Every four elements lands on a byte boundary. - static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); - for (int i = 0; i < kDegree / 4; i++) { - uint32_t a = s->c[4 * i]; - uint32_t b = s->c[4 * i + 1]; - uint32_t c = s->c[4 * i + 2]; - uint32_t d = s->c[4 * i + 3]; - declassify_assert(a < 1024); - declassify_assert(b < 1024); - declassify_assert(c < 1024); - declassify_assert(d < 1024); - out[5 * i] = (uint8_t)a; - out[5 * i + 1] = (uint8_t)((a >> 8) | (b << 2)); - out[5 * i + 2] = (uint8_t)((b >> 6) | (c << 4)); - out[5 * i + 3] = (uint8_t)((c >> 4) | (d << 6)); - out[5 * i + 4] = (uint8_t)(d >> 2); - } -} - -// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(b) = 4 and b = 4. -static void scalar_encode_signed_4_4(uint8_t out[128], const scalar *s) { - // Every two elements lands on a byte boundary. - static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); - for (int i = 0; i < kDegree / 2; i++) { - uint32_t a = mod_sub(4, s->c[2 * i]); - uint32_t b = mod_sub(4, s->c[2 * i + 1]); - declassify_assert(a < 16); - declassify_assert(b < 16); - out[i] = a | (b << 4); - } -} - -// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(b) = 13 and b = -// 2^12. -static void scalar_encode_signed_13_12(uint8_t out[416], const scalar *s) { - static const uint32_t kMax = 1u << 12; - // Every two elements lands on a byte boundary. - static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); - for (int i = 0; i < kDegree / 8; i++) { - uint32_t a = mod_sub(kMax, s->c[8 * i]); - uint32_t b = mod_sub(kMax, s->c[8 * i + 1]); - uint32_t c = mod_sub(kMax, s->c[8 * i + 2]); - uint32_t d = mod_sub(kMax, s->c[8 * i + 3]); - uint32_t e = mod_sub(kMax, s->c[8 * i + 4]); - uint32_t f = mod_sub(kMax, s->c[8 * i + 5]); - uint32_t g = mod_sub(kMax, s->c[8 * i + 6]); - uint32_t h = mod_sub(kMax, s->c[8 * i + 7]); - declassify_assert(a < (1u << 13)); - declassify_assert(b < (1u << 13)); - declassify_assert(c < (1u << 13)); - declassify_assert(d < (1u << 13)); - declassify_assert(e < (1u << 13)); - declassify_assert(f < (1u << 13)); - declassify_assert(g < (1u << 13)); - declassify_assert(h < (1u << 13)); - a |= b << 13; - a |= c << 26; - c >>= 6; - c |= d << 7; - c |= e << 20; - e >>= 12; - e |= f << 1; - e |= g << 14; - e |= h << 27; - h >>= 5; - OPENSSL_memcpy(&out[13 * i], &a, sizeof(a)); - OPENSSL_memcpy(&out[13 * i + 4], &c, sizeof(c)); - OPENSSL_memcpy(&out[13 * i + 8], &e, sizeof(e)); - OPENSSL_memcpy(&out[13 * i + 12], &h, 1); - } -} - -// FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(b) = 20 and b = -// 2^19. -static void scalar_encode_signed_20_19(uint8_t out[640], const scalar *s) { - static const uint32_t kMax = 1u << 19; - // Every two elements lands on a byte boundary. - static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); - for (int i = 0; i < kDegree / 4; i++) { - uint32_t a = mod_sub(kMax, s->c[4 * i]); - uint32_t b = mod_sub(kMax, s->c[4 * i + 1]); - uint32_t c = mod_sub(kMax, s->c[4 * i + 2]); - uint32_t d = mod_sub(kMax, s->c[4 * i + 3]); - declassify_assert(a < (1u << 20)); - declassify_assert(b < (1u << 20)); - declassify_assert(c < (1u << 20)); - declassify_assert(d < (1u << 20)); - a |= b << 20; - b >>= 12; - b |= c << 8; - b |= d << 28; - d >>= 4; - OPENSSL_memcpy(&out[10 * i], &a, sizeof(a)); - OPENSSL_memcpy(&out[10 * i + 4], &b, sizeof(b)); - OPENSSL_memcpy(&out[10 * i + 8], &d, 2); - } -} - -// FIPS 204, Algorithm 17 (`BitPack`). -static void scalar_encode_signed(uint8_t *out, const scalar *s, int bits, - uint32_t max) { - if (bits == 4) { - assert(max == 4); - scalar_encode_signed_4_4(out, s); - } else if (bits == 20) { - assert(max == 1u << 19); - scalar_encode_signed_20_19(out, s); - } else { - assert(bits == 13); - assert(max == 1u << 12); - scalar_encode_signed_13_12(out, s); - } -} - -// FIPS 204, Algorithm 18 (`SimpleBitUnpack`). Specialized for bitlen(b) == 10. -static void scalar_decode_10(scalar *out, const uint8_t in[320]) { - uint32_t v; - static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); - for (int i = 0; i < kDegree / 4; i++) { - OPENSSL_memcpy(&v, &in[5 * i], sizeof(v)); - out->c[4 * i] = v & 0x3ff; - out->c[4 * i + 1] = (v >> 10) & 0x3ff; - out->c[4 * i + 2] = (v >> 20) & 0x3ff; - out->c[4 * i + 3] = (v >> 30) | (((uint32_t)in[5 * i + 4]) << 2); - } -} - -// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 4 and b = -// 4. -static int scalar_decode_signed_4_4(scalar *out, const uint8_t in[128]) { - uint32_t v; - static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); - for (int i = 0; i < kDegree / 8; i++) { - OPENSSL_memcpy(&v, &in[4 * i], sizeof(v)); - // None of the nibbles may be >= 9. So if the MSB of any nibble is set, none - // of the other bits may be set. First, select all the MSBs. - const uint32_t msbs = v & 0x88888888u; - // For each nibble where the MSB is set, form a mask of all the other bits. - const uint32_t mask = (msbs >> 1) | (msbs >> 2) | (msbs >> 3); - // A nibble is only out of range in the case of invalid input, in which case - // it is okay to leak the value. - if (constant_time_declassify_int((mask & v) != 0)) { - return 0; - } - - out->c[i * 8] = mod_sub(4, v & 15); - out->c[i * 8 + 1] = mod_sub(4, (v >> 4) & 15); - out->c[i * 8 + 2] = mod_sub(4, (v >> 8) & 15); - out->c[i * 8 + 3] = mod_sub(4, (v >> 12) & 15); - out->c[i * 8 + 4] = mod_sub(4, (v >> 16) & 15); - out->c[i * 8 + 5] = mod_sub(4, (v >> 20) & 15); - out->c[i * 8 + 6] = mod_sub(4, (v >> 24) & 15); - out->c[i * 8 + 7] = mod_sub(4, v >> 28); - } - return 1; -} - -// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 13 and b = -// 2^12. -static void scalar_decode_signed_13_12(scalar *out, const uint8_t in[416]) { - static const uint32_t kMax = 1u << 12; - static const uint32_t k13Bits = (1u << 13) - 1; - static const uint32_t k7Bits = (1u << 7) - 1; - - uint32_t a, b, c; - uint8_t d; - static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); - for (int i = 0; i < kDegree / 8; i++) { - OPENSSL_memcpy(&a, &in[13 * i], sizeof(a)); - OPENSSL_memcpy(&b, &in[13 * i + 4], sizeof(b)); - OPENSSL_memcpy(&c, &in[13 * i + 8], sizeof(c)); - d = in[13 * i + 12]; - - // It's not possible for a 13-bit number to be out of range when the max is - // 2^12. - out->c[i * 8] = mod_sub(kMax, a & k13Bits); - out->c[i * 8 + 1] = mod_sub(kMax, (a >> 13) & k13Bits); - out->c[i * 8 + 2] = mod_sub(kMax, (a >> 26) | ((b & k7Bits) << 6)); - out->c[i * 8 + 3] = mod_sub(kMax, (b >> 7) & k13Bits); - out->c[i * 8 + 4] = mod_sub(kMax, (b >> 20) | ((c & 1) << 12)); - out->c[i * 8 + 5] = mod_sub(kMax, (c >> 1) & k13Bits); - out->c[i * 8 + 6] = mod_sub(kMax, (c >> 14) & k13Bits); - out->c[i * 8 + 7] = mod_sub(kMax, (c >> 27) | ((uint32_t)d) << 5); - } -} - -// FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 20 and b = -// 2^19. -static void scalar_decode_signed_20_19(scalar *out, const uint8_t in[640]) { - static const uint32_t kMax = 1u << 19; - static const uint32_t k20Bits = (1u << 20) - 1; - - uint32_t a, b; - uint16_t c; - static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); - for (int i = 0; i < kDegree / 4; i++) { - OPENSSL_memcpy(&a, &in[10 * i], sizeof(a)); - OPENSSL_memcpy(&b, &in[10 * i + 4], sizeof(b)); - OPENSSL_memcpy(&c, &in[10 * i + 8], sizeof(c)); - - // It's not possible for a 20-bit number to be out of range when the max is - // 2^19. - out->c[i * 4] = mod_sub(kMax, a & k20Bits); - out->c[i * 4 + 1] = mod_sub(kMax, (a >> 20) | ((b & 0xff) << 12)); - out->c[i * 4 + 2] = mod_sub(kMax, (b >> 8) & k20Bits); - out->c[i * 4 + 3] = mod_sub(kMax, (b >> 28) | ((uint32_t)c) << 4); - } -} - -// FIPS 204, Algorithm 19 (`BitUnpack`). -static int scalar_decode_signed(scalar *out, const uint8_t *in, int bits, - uint32_t max) { - if (bits == 4) { - assert(max == 4); - return scalar_decode_signed_4_4(out, in); - } else if (bits == 13) { - assert(max == (1u << 12)); - scalar_decode_signed_13_12(out, in); - return 1; - } else if (bits == 20) { - assert(max == (1u << 19)); - scalar_decode_signed_20_19(out, in); - return 1; - } else { - abort(); - } -} - -/* Expansion functions */ - -// FIPS 204, Algorithm 30 (`RejNTTPoly`). -// -// Rejection samples a Keccak stream to get uniformly distributed elements. This -// is used for matrix expansion and only operates on public inputs. -static void scalar_from_keccak_vartime( - scalar *out, const uint8_t derived_seed[kRhoBytes + 2]) { - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); - BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kRhoBytes + 2); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 168); - static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); - - int done = 0; - while (done < kDegree) { - uint8_t block[168]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - for (size_t i = 0; i < sizeof(block) && done < kDegree; i += 3) { - // FIPS 204, Algorithm 14 (`CoeffFromThreeBytes`). - uint32_t value = (uint32_t)block[i] | ((uint32_t)block[i + 1] << 8) | - (((uint32_t)block[i + 2] & 0x7f) << 16); - if (value < kPrime) { - out->c[done++] = value; - } - } - } -} - -template -static bool coefficient_from_nibble(uint32_t nibble, uint32_t *result); - -template <> -bool coefficient_from_nibble<4>(uint32_t nibble, uint32_t *result) { - if (constant_time_declassify_int(nibble < 9)) { - *result = mod_sub(4, nibble); - return true; - } - return false; -} - -// FIPS 204, Algorithm 31 (`RejBoundedPoly`). -template -static void scalar_uniform(scalar *out, - const uint8_t derived_seed[kSigmaBytes + 2]) { - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kSigmaBytes + 2); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 136); - - int done = 0; - while (done < kDegree) { - uint8_t block[136]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - for (size_t i = 0; i < sizeof(block) && done < kDegree; ++i) { - uint32_t t0 = block[i] & 0x0F; - uint32_t t1 = block[i] >> 4; - // FIPS 204, Algorithm 15 (`CoefFromHalfByte`). Although both the input - // and output here are secret, it is OK to leak when we rejected a byte. - // Individual bytes of the SHAKE-256 stream are (indistiguishable from) - // independent of each other and the original seed, so leaking information - // about the rejected bytes does not reveal the input or output. - uint32_t v; - if (coefficient_from_nibble(t0, &v)) { - out->c[done++] = v; - } - if (done < kDegree && coefficient_from_nibble(t1, &v)) { - out->c[done++] = v; - } - } - } -} - -// FIPS 204, Algorithm 34 (`ExpandMask`), but just a single step. -static void scalar_sample_mask(scalar *out, - const uint8_t derived_seed[kRhoPrimeBytes + 2]) { - uint8_t buf[640]; - BORINGSSL_keccak(buf, sizeof(buf), derived_seed, kRhoPrimeBytes + 2, - boringssl_shake256); - - scalar_decode_signed_20_19(out, buf); -} - -// FIPS 204, Algorithm 29 (`SampleInBall`). -static void scalar_sample_in_ball_vartime(scalar *out, const uint8_t *seed, - int len, int tau) { - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, seed, len); - assert(keccak_ctx.squeeze_offset == 0); - assert(keccak_ctx.rate_bytes == 136); - - uint8_t block[136]; - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - - uint64_t signs = CRYPTO_load_u64_le(block); - int offset = 8; - // SampleInBall implements a Fisher–Yates shuffle, which unavoidably leaks - // where the zeros are by memory access pattern. Although this leak happens - // before bad signatures are rejected, this is safe. See - // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/8d8f01ac_70af3f21/ - CONSTTIME_DECLASSIFY(block + offset, sizeof(block) - offset); - - OPENSSL_memset(out, 0, sizeof(*out)); - for (size_t i = kDegree - tau; i < kDegree; i++) { - size_t byte; - for (;;) { - if (offset == 136) { - BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); - // See above. - CONSTTIME_DECLASSIFY(block, sizeof(block)); - offset = 0; - } - - byte = block[offset++]; - if (byte <= i) { - break; - } - } - - out->c[i] = out->c[byte]; - out->c[byte] = mod_sub(1, 2 * (signs & 1)); - signs >>= 1; - } -} - -// FIPS 204, Algorithm 32 (`ExpandA`). -template -static void matrix_expand(matrix *out, const uint8_t rho[kRhoBytes]) { - static_assert(K <= 0x100, "K must fit in 8 bits"); - static_assert(L <= 0x100, "L must fit in 8 bits"); - - uint8_t derived_seed[kRhoBytes + 2]; - OPENSSL_memcpy(derived_seed, rho, kRhoBytes); - for (int i = 0; i < K; i++) { - for (int j = 0; j < L; j++) { - derived_seed[kRhoBytes + 1] = (uint8_t)i; - derived_seed[kRhoBytes] = (uint8_t)j; - scalar_from_keccak_vartime(&out->v[i][j], derived_seed); - } - } -} - -// FIPS 204, Algorithm 33 (`ExpandS`). -template -static void vector_expand_short(vector *s1, vector *s2, - const uint8_t sigma[kSigmaBytes]) { - static_assert(K <= 0x100, "K must fit in 8 bits"); - static_assert(L <= 0x100, "L must fit in 8 bits"); - static_assert(K + L <= 0x100, "K+L must fit in 8 bits"); - - uint8_t derived_seed[kSigmaBytes + 2]; - OPENSSL_memcpy(derived_seed, sigma, kSigmaBytes); - derived_seed[kSigmaBytes] = 0; - derived_seed[kSigmaBytes + 1] = 0; - for (int i = 0; i < L; i++) { - scalar_uniform()>(&s1->v[i], derived_seed); - ++derived_seed[kSigmaBytes]; - } - for (int i = 0; i < K; i++) { - scalar_uniform()>(&s2->v[i], derived_seed); - ++derived_seed[kSigmaBytes]; - } -} - -// FIPS 204, Algorithm 34 (`ExpandMask`). -template -static void vector_expand_mask(vector *out, - const uint8_t seed[kRhoPrimeBytes], - size_t kappa) { - assert(kappa + L <= 0x10000); - - uint8_t derived_seed[kRhoPrimeBytes + 2]; - OPENSSL_memcpy(derived_seed, seed, kRhoPrimeBytes); - for (int i = 0; i < L; i++) { - size_t index = kappa + i; - derived_seed[kRhoPrimeBytes] = index & 0xFF; - derived_seed[kRhoPrimeBytes + 1] = (index >> 8) & 0xFF; - scalar_sample_mask(&out->v[i], derived_seed); - } -} - -/* Encoding */ - -// FIPS 204, Algorithm 16 (`SimpleBitPack`). -// -// Encodes an entire vector into 32*K*|bits| bytes. Note that since 256 -// (kDegree) is divisible by 8, the individual vector entries will always fill a -// whole number of bytes, so we do not need to worry about bit packing here. -template -static void vector_encode(uint8_t *out, const vector *a, int bits) { - if (bits == 4) { - for (int i = 0; i < K; i++) { - scalar_encode_4(out + i * bits * kDegree / 8, &a->v[i]); - } - } else { - assert(bits == 10); - for (int i = 0; i < K; i++) { - scalar_encode_10(out + i * bits * kDegree / 8, &a->v[i]); - } - } -} - -// FIPS 204, Algorithm 18 (`SimpleBitUnpack`). -template -static void vector_decode_10(vector *out, const uint8_t *in) { - for (int i = 0; i < K; i++) { - scalar_decode_10(&out->v[i], in + i * 10 * kDegree / 8); - } -} - -// FIPS 204, Algorithm 17 (`BitPack`). -// -// Encodes an entire vector into 32*L*|bits| bytes. Note that since 256 -// (kDegree) is divisible by 8, the individual vector entries will always fill a -// whole number of bytes, so we do not need to worry about bit packing here. -template -static void vector_encode_signed(uint8_t *out, const vector *a, int bits, - uint32_t max) { - for (int i = 0; i < X; i++) { - scalar_encode_signed(out + i * bits * kDegree / 8, &a->v[i], bits, max); - } -} - -template -static int vector_decode_signed(vector *out, const uint8_t *in, int bits, - uint32_t max) { - for (int i = 0; i < X; i++) { - if (!scalar_decode_signed(&out->v[i], in + i * bits * kDegree / 8, bits, - max)) { - return 0; - } - } - return 1; -} - -// FIPS 204, Algorithm 28 (`w1Encode`). -template -static void w1_encode(uint8_t out[128 * K], const vector *w1) { - vector_encode(out, w1, 4); -} - -// FIPS 204, Algorithm 20 (`HintBitPack`). -template -static void hint_bit_pack(uint8_t out[omega() + K], const vector *h) { - OPENSSL_memset(out, 0, omega() + K); - int index = 0; - for (int i = 0; i < K; i++) { - for (int j = 0; j < kDegree; j++) { - if (h->v[i].c[j]) { - // h must have at most omega() non-zero coefficients. - BSSL_CHECK(index < omega()); - out[index++] = j; - } - } - out[omega() + i] = index; - } -} - -// FIPS 204, Algorithm 21 (`HintBitUnpack`). -template -static int hint_bit_unpack(vector *h, const uint8_t in[omega() + K]) { - vector_zero(h); - int index = 0; - for (int i = 0; i < K; i++) { - const int limit = in[omega() + i]; - if (limit < index || limit > omega()) { - return 0; - } - - int last = -1; - while (index < limit) { - int byte = in[index++]; - if (last >= 0 && byte <= last) { - return 0; - } - last = byte; - static_assert(kDegree == 256, - "kDegree must be 256 for this write to be in bounds"); - h->v[i].c[byte] = 1; - } - } - for (; index < omega(); index++) { - if (in[index] != 0) { - return 0; - } - } - return 1; -} - -template -struct public_key { - uint8_t rho[kRhoBytes]; - vector t1; - // Pre-cached value(s). - uint8_t public_key_hash[kTrBytes]; -}; - -template -struct private_key { - uint8_t rho[kRhoBytes]; - uint8_t k[kKBytes]; - uint8_t public_key_hash[kTrBytes]; - vector s1; - vector s2; - vector t0; -}; - -template -struct signature { - uint8_t c_tilde[2 * lambda_bytes()]; - vector z; - vector h; -}; - -// FIPS 204, Algorithm 22 (`pkEncode`). -template -static int mldsa_marshal_public_key(CBB *out, const struct public_key *pub) { - if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { - return 0; - } - - uint8_t *vectork_output; - if (!CBB_add_space(out, &vectork_output, 320 * K)) { - return 0; - } - vector_encode(vectork_output, &pub->t1, 10); - - return 1; -} - -// FIPS 204, Algorithm 23 (`pkDecode`). -template -static int mldsa_parse_public_key(struct public_key *pub, CBS *in) { - const CBS orig_in = *in; - - if (!CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { - return 0; - } - - CBS t1_bytes; - if (!CBS_get_bytes(in, &t1_bytes, 320 * K) || CBS_len(in) != 0) { - return 0; - } - vector_decode_10(&pub->t1, CBS_data(&t1_bytes)); - - // Compute pre-cached values. - BORINGSSL_keccak(pub->public_key_hash, sizeof(pub->public_key_hash), - CBS_data(&orig_in), CBS_len(&orig_in), boringssl_shake256); - - return 1; -} - -// FIPS 204, Algorithm 24 (`skEncode`). -template -static int mldsa_marshal_private_key(CBB *out, - const struct private_key *priv) { - if (!CBB_add_bytes(out, priv->rho, sizeof(priv->rho)) || - !CBB_add_bytes(out, priv->k, sizeof(priv->k)) || - !CBB_add_bytes(out, priv->public_key_hash, - sizeof(priv->public_key_hash))) { - return 0; - } - - constexpr size_t scalar_bytes = - (kDegree * plus_minus_eta_bitlen() + 7) / 8; - uint8_t *vectorl_output; - if (!CBB_add_space(out, &vectorl_output, scalar_bytes * L)) { - return 0; - } - vector_encode_signed(vectorl_output, &priv->s1, plus_minus_eta_bitlen(), - eta()); - - uint8_t *s2_output; - if (!CBB_add_space(out, &s2_output, scalar_bytes * K)) { - return 0; - } - vector_encode_signed(s2_output, &priv->s2, plus_minus_eta_bitlen(), - eta()); - - uint8_t *t0_output; - if (!CBB_add_space(out, &t0_output, 416 * K)) { - return 0; - } - vector_encode_signed(t0_output, &priv->t0, 13, 1 << 12); - - return 1; -} - -// FIPS 204, Algorithm 25 (`skDecode`). -template -static int mldsa_parse_private_key(struct private_key *priv, CBS *in) { - CBS s1_bytes; - CBS s2_bytes; - CBS t0_bytes; - constexpr size_t scalar_bytes = - (kDegree * plus_minus_eta_bitlen() + 7) / 8; - if (!CBS_copy_bytes(in, priv->rho, sizeof(priv->rho)) || - !CBS_copy_bytes(in, priv->k, sizeof(priv->k)) || - !CBS_copy_bytes(in, priv->public_key_hash, - sizeof(priv->public_key_hash)) || - !CBS_get_bytes(in, &s1_bytes, scalar_bytes * L) || - !vector_decode_signed(&priv->s1, CBS_data(&s1_bytes), - plus_minus_eta_bitlen(), eta()) || - !CBS_get_bytes(in, &s2_bytes, scalar_bytes * K) || - !vector_decode_signed(&priv->s2, CBS_data(&s2_bytes), - plus_minus_eta_bitlen(), eta()) || - !CBS_get_bytes(in, &t0_bytes, 416 * K) || - // Note: Decoding 13 bits into (-2^12, 2^12] cannot fail. - !vector_decode_signed(&priv->t0, CBS_data(&t0_bytes), 13, 1 << 12)) { - return 0; - } - - return 1; -} - -// FIPS 204, Algorithm 26 (`sigEncode`). -template -static int mldsa_marshal_signature(CBB *out, - const struct signature *sign) { - if (!CBB_add_bytes(out, sign->c_tilde, sizeof(sign->c_tilde))) { - return 0; - } - - uint8_t *vectorl_output; - if (!CBB_add_space(out, &vectorl_output, 640 * L)) { - return 0; - } - vector_encode_signed(vectorl_output, &sign->z, 20, 1 << 19); - - uint8_t *hint_output; - if (!CBB_add_space(out, &hint_output, omega() + K)) { - return 0; - } - hint_bit_pack(hint_output, &sign->h); - - return 1; -} - -// FIPS 204, Algorithm 27 (`sigDecode`). -template -static int mldsa_parse_signature(struct signature *sign, CBS *in) { - CBS z_bytes; - CBS hint_bytes; - if (!CBS_copy_bytes(in, sign->c_tilde, sizeof(sign->c_tilde)) || - !CBS_get_bytes(in, &z_bytes, 640 * L) || - // Note: Decoding 20 bits into (-2^19, 2^19] cannot fail. - !vector_decode_signed(&sign->z, CBS_data(&z_bytes), 20, 1 << 19) || - !CBS_get_bytes(in, &hint_bytes, omega() + K) || - !hint_bit_unpack(&sign->h, CBS_data(&hint_bytes))) { - return 0; - }; - - return 1; -} - -template -struct DeleterFree { - void operator()(T *ptr) { OPENSSL_free(ptr); } -}; - -// FIPS 204, Algorithm 6 (`ML-DSA.KeyGen_internal`). Returns 1 on success and 0 -// on failure. -template -static int mldsa_generate_key_external_entropy( - uint8_t out_encoded_public_key[public_key_bytes()], - struct private_key *priv, const uint8_t entropy[MLDSA_SEED_BYTES]) { - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct public_key pub; - matrix a_ntt; - vector s1_ntt; - vector t; - }; - std::unique_ptr> values( - reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); - if (values == NULL) { - return 0; - } - - uint8_t augmented_entropy[MLDSA_SEED_BYTES + 2]; - OPENSSL_memcpy(augmented_entropy, entropy, MLDSA_SEED_BYTES); - // The k and l parameters are appended to the seed. - augmented_entropy[MLDSA_SEED_BYTES] = K; - augmented_entropy[MLDSA_SEED_BYTES + 1] = L; - uint8_t expanded_seed[kRhoBytes + kSigmaBytes + kKBytes]; - BORINGSSL_keccak(expanded_seed, sizeof(expanded_seed), augmented_entropy, - sizeof(augmented_entropy), boringssl_shake256); - const uint8_t *const rho = expanded_seed; - const uint8_t *const sigma = expanded_seed + kRhoBytes; - const uint8_t *const k = expanded_seed + kRhoBytes + kSigmaBytes; - // rho is public. - CONSTTIME_DECLASSIFY(rho, kRhoBytes); - OPENSSL_memcpy(values->pub.rho, rho, sizeof(values->pub.rho)); - OPENSSL_memcpy(priv->rho, rho, sizeof(priv->rho)); - OPENSSL_memcpy(priv->k, k, sizeof(priv->k)); - - matrix_expand(&values->a_ntt, rho); - vector_expand_short(&priv->s1, &priv->s2, sigma); - - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vector_ntt(&values->s1_ntt); - - matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); - vector_inverse_ntt(&values->t); - vector_add(&values->t, &values->t, &priv->s2); - - vector_power2_round(&values->pub.t1, &priv->t0, &values->t); - // t1 is public. - CONSTTIME_DECLASSIFY(&values->pub.t1, sizeof(values->pub.t1)); - - CBB cbb; - CBB_init_fixed(&cbb, out_encoded_public_key, public_key_bytes()); - if (!mldsa_marshal_public_key(&cbb, &values->pub)) { - return 0; - } - assert(CBB_len(&cbb) == public_key_bytes()); - - BORINGSSL_keccak(priv->public_key_hash, sizeof(priv->public_key_hash), - out_encoded_public_key, public_key_bytes(), - boringssl_shake256); - - return 1; -} - -template -static int mldsa_public_from_private(struct public_key *pub, - const struct private_key *priv) { - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - matrix a_ntt; - vector s1_ntt; - vector t; - vector t0; - }; - std::unique_ptr> values( - reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); - if (values == NULL) { - return 0; - } - - - OPENSSL_memcpy(pub->rho, priv->rho, sizeof(pub->rho)); - OPENSSL_memcpy(pub->public_key_hash, priv->public_key_hash, - sizeof(pub->public_key_hash)); - - matrix_expand(&values->a_ntt, priv->rho); - - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vector_ntt(&values->s1_ntt); - - matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); - vector_inverse_ntt(&values->t); - vector_add(&values->t, &values->t, &priv->s2); - - vector_power2_round(&pub->t1, &values->t0, &values->t); - return 1; -} - -// FIPS 204, Algorithm 7 (`ML-DSA.Sign_internal`). Returns 1 on success and 0 -// on failure. -template -static int mldsa_sign_internal( - uint8_t out_encoded_signature[signature_bytes()], - const struct private_key *priv, const uint8_t *msg, size_t msg_len, - const uint8_t *context_prefix, size_t context_prefix_len, - const uint8_t *context, size_t context_len, - const uint8_t randomizer[MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { - uint8_t mu[kMuBytes]; - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, priv->public_key_hash, - sizeof(priv->public_key_hash)); - BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); - BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); - BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); - BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); - - uint8_t rho_prime[kRhoPrimeBytes]; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, priv->k, sizeof(priv->k)); - BORINGSSL_keccak_absorb(&keccak_ctx, randomizer, - MLDSA_SIGNATURE_RANDOMIZER_BYTES); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); - BORINGSSL_keccak_squeeze(&keccak_ctx, rho_prime, kRhoPrimeBytes); - - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct signature sign; - vector s1_ntt; - vector s2_ntt; - vector t0_ntt; - matrix a_ntt; - vector y; - vector w; - vector w1; - vector cs1; - vector cs2; - }; - std::unique_ptr> values( - reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); - if (values == NULL) { - return 0; - } - OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); - vector_ntt(&values->s1_ntt); - - OPENSSL_memcpy(&values->s2_ntt, &priv->s2, sizeof(values->s2_ntt)); - vector_ntt(&values->s2_ntt); - - OPENSSL_memcpy(&values->t0_ntt, &priv->t0, sizeof(values->t0_ntt)); - vector_ntt(&values->t0_ntt); - - matrix_expand(&values->a_ntt, priv->rho); - - // kappa must not exceed 2**16/L = 13107. But the probability of it - // exceeding even 1000 iterations is vanishingly small. - for (size_t kappa = 0;; kappa += L) { - vector_expand_mask(&values->y, rho_prime, kappa); - - vector *y_ntt = &values->cs1; - OPENSSL_memcpy(y_ntt, &values->y, sizeof(*y_ntt)); - vector_ntt(y_ntt); - - matrix_mult(&values->w, &values->a_ntt, y_ntt); - vector_inverse_ntt(&values->w); - - vector_high_bits(&values->w1, &values->w); - uint8_t w1_encoded[128 * K]; - w1_encode(w1_encoded, &values->w1); - - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); - BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); - BORINGSSL_keccak_squeeze(&keccak_ctx, values->sign.c_tilde, - 2 * lambda_bytes()); - - scalar c_ntt; - scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, - sizeof(values->sign.c_tilde), tau()); - scalar_ntt(&c_ntt); - - vector_mult_scalar(&values->cs1, &values->s1_ntt, &c_ntt); - vector_inverse_ntt(&values->cs1); - vector_mult_scalar(&values->cs2, &values->s2_ntt, &c_ntt); - vector_inverse_ntt(&values->cs2); - - vector_add(&values->sign.z, &values->y, &values->cs1); - - vector *r0 = &values->w1; - vector_sub(r0, &values->w, &values->cs2); - vector_low_bits(r0, r0); - - // Leaking the fact that a signature was rejected is fine as the next - // attempt at a signature will be (indistinguishable from) independent of - // this one. Note, however, that we additionally leak which of the two - // branches rejected the signature. Section 5.5 of - // https://pq-crystals.org/dilithium/data/dilithium-specification-round3.pdf - // describes this leak as OK. Note we leak less than what is described by - // the paper; we do not reveal which coefficient violated the bound, and - // we hide which of the |z_max| or |r0_max| bound failed. See also - // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/2bbab0fa_d241d35a/ - uint32_t z_max = vector_max(&values->sign.z); - uint32_t r0_max = vector_max_signed(r0); - if (constant_time_declassify_w( - constant_time_ge_w(z_max, gamma1() - beta()) | - constant_time_ge_w(r0_max, kGamma2 - beta()))) { - continue; - } - - vector *ct0 = &values->w1; - vector_mult_scalar(ct0, &values->t0_ntt, &c_ntt); - vector_inverse_ntt(ct0); - vector_make_hint(&values->sign.h, ct0, &values->cs2, &values->w); - - // See above. - uint32_t ct0_max = vector_max(ct0); - size_t h_ones = vector_count_ones(&values->sign.h); - if (constant_time_declassify_w(constant_time_ge_w(ct0_max, kGamma2) | - constant_time_lt_w(omega(), h_ones))) { - continue; - } - - // Although computed with the private key, the signature is public. - CONSTTIME_DECLASSIFY(values->sign.c_tilde, sizeof(values->sign.c_tilde)); - CONSTTIME_DECLASSIFY(&values->sign.z, sizeof(values->sign.z)); - CONSTTIME_DECLASSIFY(&values->sign.h, sizeof(values->sign.h)); - - CBB cbb; - CBB_init_fixed(&cbb, out_encoded_signature, signature_bytes()); - if (!mldsa_marshal_signature(&cbb, &values->sign)) { - return 0; - } - - BSSL_CHECK(CBB_len(&cbb) == signature_bytes()); - return 1; - } -} - -// FIPS 204, Algorithm 8 (`ML-DSA.Verify_internal`). -template -static int mldsa_verify_internal( - const struct public_key *pub, - const uint8_t encoded_signature[signature_bytes()], const uint8_t *msg, - size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, - const uint8_t *context, size_t context_len) { - // Intermediate values, allocated on the heap to allow use when there is a - // limited amount of stack. - struct values_st { - struct signature sign; - matrix a_ntt; - vector z_ntt; - vector az_ntt; - vector ct1_ntt; - }; - std::unique_ptr> values( - reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); - if (values == NULL) { - return 0; - } - - CBS cbs; - CBS_init(&cbs, encoded_signature, signature_bytes()); - if (!mldsa_parse_signature(&values->sign, &cbs)) { - return 0; - } - - matrix_expand(&values->a_ntt, pub->rho); - - uint8_t mu[kMuBytes]; - struct BORINGSSL_keccak_st keccak_ctx; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, pub->public_key_hash, - sizeof(pub->public_key_hash)); - BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); - BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); - BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); - BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); - - scalar c_ntt; - scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, - sizeof(values->sign.c_tilde), tau()); - scalar_ntt(&c_ntt); - - OPENSSL_memcpy(&values->z_ntt, &values->sign.z, sizeof(values->z_ntt)); - vector_ntt(&values->z_ntt); - - matrix_mult(&values->az_ntt, &values->a_ntt, &values->z_ntt); - - vector_scale_power2_round(&values->ct1_ntt, &pub->t1); - vector_ntt(&values->ct1_ntt); - - vector_mult_scalar(&values->ct1_ntt, &values->ct1_ntt, &c_ntt); - - vector *const w1 = &values->az_ntt; - vector_sub(w1, &values->az_ntt, &values->ct1_ntt); - vector_inverse_ntt(w1); - - vector_use_hint_vartime(w1, &values->sign.h, w1); - uint8_t w1_encoded[128 * K]; - w1_encode(w1_encoded, w1); - - uint8_t c_tilde[2 * lambda_bytes()]; - BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); - BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); - BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); - BORINGSSL_keccak_squeeze(&keccak_ctx, c_tilde, 2 * lambda_bytes()); - - uint32_t z_max = vector_max(&values->sign.z); - return z_max < static_cast(gamma1() - beta()) && - OPENSSL_memcmp(c_tilde, values->sign.c_tilde, 2 * lambda_bytes()) == - 0; -} - -} // namespace - -// ML-DSA-65 specific wrappers. - -static struct private_key<6, 5> *mldsa65_private_key_from_external( - const struct MLDSA65_private_key *external) { - static_assert(sizeof(struct MLDSA65_private_key) == - sizeof(struct private_key<6, 5>), - "MLDSA65 private key size incorrect"); - static_assert(alignof(struct MLDSA65_private_key) == - alignof(struct private_key<6, 5>), - "MLDSA65 private key align incorrect"); - return (struct private_key<6, 5> *)external; -} - -static struct public_key<6> * -mldsa65_public_key_from_external(const struct MLDSA65_public_key *external) { - static_assert(sizeof(struct MLDSA65_public_key) == - sizeof(struct public_key<6>), - "MLDSA65 public key size incorrect"); - static_assert(alignof(struct MLDSA65_public_key) == - alignof(struct public_key<6>), - "MLDSA65 public key align incorrect"); - return (struct public_key<6> *)external; -} - -int MLDSA65_parse_public_key(struct MLDSA65_public_key *public_key, CBS *in) { - return mldsa_parse_public_key(mldsa65_public_key_from_external(public_key), - in); -} - -int MLDSA65_marshal_private_key(CBB *out, - const struct MLDSA65_private_key *private_key) { - return mldsa_marshal_private_key( - out, mldsa65_private_key_from_external(private_key)); -} - -int MLDSA65_parse_private_key(struct MLDSA65_private_key *private_key, - CBS *in) { - return mldsa_parse_private_key(mldsa65_private_key_from_external(private_key), - in) && - CBS_len(in) == 0; -} - -// Calls |MLDSA_generate_key_external_entropy| with random bytes from -// |RAND_bytes|. Returns 1 on success and 0 on failure. int MLDSA65_generate_key( uint8_t out_encoded_public_key[MLDSA65_PUBLIC_KEY_BYTES], uint8_t out_seed[MLDSA_SEED_BYTES], struct MLDSA65_private_key *out_private_key) { - RAND_bytes(out_seed, MLDSA_SEED_BYTES); - return MLDSA65_generate_key_external_entropy(out_encoded_public_key, - out_private_key, out_seed); + return bcm_success(BCM_mldsa65_generate_key( + out_encoded_public_key, out_seed, + reinterpret_cast(out_private_key))); } int MLDSA65_private_key_from_seed(struct MLDSA65_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { - if (seed_len != MLDSA_SEED_BYTES) { + if (seed_len != BCM_MLDSA_SEED_BYTES) { return 0; } - uint8_t public_key[MLDSA65_PUBLIC_KEY_BYTES]; - return MLDSA65_generate_key_external_entropy(public_key, out_private_key, - seed); -} - -int MLDSA65_generate_key_external_entropy( - uint8_t out_encoded_public_key[MLDSA65_PUBLIC_KEY_BYTES], - struct MLDSA65_private_key *out_private_key, - const uint8_t entropy[MLDSA_SEED_BYTES]) { - return mldsa_generate_key_external_entropy( - out_encoded_public_key, - mldsa65_private_key_from_external(out_private_key), entropy); + return bcm_success(BCM_mldsa65_private_key_from_seed( + reinterpret_cast(out_private_key), seed)); } int MLDSA65_public_from_private(struct MLDSA65_public_key *out_public_key, const struct MLDSA65_private_key *private_key) { - return mldsa_public_from_private( - mldsa65_public_key_from_external(out_public_key), - mldsa65_private_key_from_external(private_key)); + return bcm_success(BCM_mldsa65_public_from_private( + reinterpret_cast(out_public_key), + reinterpret_cast(private_key))); } -int MLDSA65_sign_internal( - uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], - const struct MLDSA65_private_key *private_key, const uint8_t *msg, - size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, - const uint8_t *context, size_t context_len, - const uint8_t randomizer[MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { - return mldsa_sign_internal(out_encoded_signature, - mldsa65_private_key_from_external(private_key), - msg, msg_len, context_prefix, context_prefix_len, - context, context_len, randomizer); -} - -// ML-DSA signature in randomized mode, filling the random bytes with -// |RAND_bytes|. Returns 1 on success and 0 on failure. int MLDSA65_sign(uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], const struct MLDSA65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, @@ -1748,43 +60,37 @@ int MLDSA65_sign(uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], if (context_len > 255) { return 0; } - - uint8_t randomizer[MLDSA_SIGNATURE_RANDOMIZER_BYTES]; - RAND_bytes(randomizer, sizeof(randomizer)); - - const uint8_t context_prefix[2] = {0, static_cast(context_len)}; - return MLDSA65_sign_internal(out_encoded_signature, private_key, msg, msg_len, - context_prefix, sizeof(context_prefix), context, - context_len, randomizer); + return bcm_success(BCM_mldsa65_sign( + out_encoded_signature, + reinterpret_cast(private_key), msg, + msg_len, context, context_len)); } -// FIPS 204, Algorithm 3 (`ML-DSA.Verify`). int MLDSA65_verify(const struct MLDSA65_public_key *public_key, const uint8_t *signature, size_t signature_len, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { - if (context_len > 255 || signature_len != MLDSA65_SIGNATURE_BYTES) { + if (context_len > 255 || signature_len != BCM_MLDSA65_SIGNATURE_BYTES) { return 0; } + return bcm_success(BCM_mldsa65_verify( + reinterpret_cast(public_key), signature, + msg, msg_len, context, context_len)); +} - const uint8_t context_prefix[2] = {0, static_cast(context_len)}; - return MLDSA65_verify_internal(public_key, signature, msg, msg_len, - context_prefix, sizeof(context_prefix), - context, context_len); +int MLDSA65_marshal_public_key(CBB *out, + const struct MLDSA65_public_key *public_key) { + return bcm_success(BCM_mldsa65_marshal_public_key( + out, reinterpret_cast(public_key))); } -int MLDSA65_verify_internal( - const struct MLDSA65_public_key *public_key, - const uint8_t encoded_signature[MLDSA65_SIGNATURE_BYTES], - const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, - size_t context_prefix_len, const uint8_t *context, size_t context_len) { - return mldsa_verify_internal<6, 5>( - mldsa65_public_key_from_external(public_key), encoded_signature, msg, - msg_len, context_prefix, context_prefix_len, context, context_len); +int MLDSA65_parse_public_key(struct MLDSA65_public_key *public_key, CBS *in) { + return bcm_success(BCM_mldsa65_parse_public_key( + reinterpret_cast(public_key), in)); } -int MLDSA65_marshal_public_key(CBB *out, - const struct MLDSA65_public_key *public_key) { - return mldsa_marshal_public_key(out, - mldsa65_public_key_from_external(public_key)); +int MLDSA65_parse_private_key(struct MLDSA65_private_key *private_key, + CBS *in) { + return bcm_success(BCM_mldsa65_parse_private_key( + reinterpret_cast(private_key), in)); } diff --git a/Sources/CCryptoBoringSSL/crypto/mlkem/internal.h b/Sources/CCryptoBoringSSL/crypto/mlkem/internal.h index c3018798..dd177524 100644 --- a/Sources/CCryptoBoringSSL/crypto/mlkem/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/mlkem/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/mlkem/mlkem.cc b/Sources/CCryptoBoringSSL/crypto/mlkem/mlkem.cc index 1752dcab..c29fe03e 100644 --- a/Sources/CCryptoBoringSSL/crypto/mlkem/mlkem.cc +++ b/Sources/CCryptoBoringSSL/crypto/mlkem/mlkem.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,8 +24,8 @@ #include #include +#include "../fipsmodule/keccak/internal.h" #include "../internal.h" -#include "../keccak/internal.h" #include "./internal.h" @@ -75,11 +75,11 @@ static const int kDV768 = 4; static const int kDU1024 = 11; static const int kDV1024 = 5; -constexpr size_t encoded_vector_size(int rank) { +static constexpr size_t encoded_vector_size(int rank) { return (kLog2Prime * DEGREE / 8) * static_cast(rank); } -constexpr size_t encoded_public_key_size(int rank) { +static constexpr size_t encoded_public_key_size(int rank) { return encoded_vector_size(rank) + /*sizeof(rho)=*/32; } @@ -88,13 +88,13 @@ static_assert(encoded_public_key_size(RANK768) == MLKEM768_PUBLIC_KEY_BYTES, static_assert(encoded_public_key_size(RANK1024) == MLKEM1024_PUBLIC_KEY_BYTES, ""); -constexpr size_t compressed_vector_size(int rank) { +static constexpr size_t compressed_vector_size(int rank) { // `if constexpr` isn't available in C++17. return (rank == RANK768 ? kDU768 : kDU1024) * static_cast(rank) * DEGREE / 8; } -constexpr size_t ciphertext_size(int rank) { +static constexpr size_t ciphertext_size(int rank) { return compressed_vector_size(rank) + (rank == RANK768 ? kDV768 : kDV1024) * DEGREE / 8; } @@ -743,9 +743,9 @@ static int mlkem_marshal_public_key(CBB *out, } template -void mlkem_generate_key_external_seed(uint8_t *out_encoded_public_key, - private_key *priv, - const uint8_t seed[MLKEM_SEED_BYTES]) { +static void mlkem_generate_key_external_seed( + uint8_t *out_encoded_public_key, private_key *priv, + const uint8_t seed[MLKEM_SEED_BYTES]) { uint8_t augmented_seed[33]; OPENSSL_memcpy(augmented_seed, seed, 32); augmented_seed[32] = RANK; diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs7/internal.h b/Sources/CCryptoBoringSSL/crypto/pkcs7/internal.h index d7ac8f0c..de1cd4bf 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs7/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/pkcs7/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7.cc b/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7.cc index 43a92ff5..757ba110 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7.cc +++ b/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7_x509.cc b/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7_x509.cc index a5d942aa..554ea38c 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7_x509.cc +++ b/Sources/CCryptoBoringSSL/crypto/pkcs7/pkcs7_x509.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs8/internal.h b/Sources/CCryptoBoringSSL/crypto/pkcs8/internal.h index 20b62e4f..5bdde14b 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs8/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/pkcs8/internal.h @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs8/p5_pbev2.cc b/Sources/CCryptoBoringSSL/crypto/pkcs8/p5_pbev2.cc index 9a505e19..0e5d9aef 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs8/p5_pbev2.cc +++ b/Sources/CCryptoBoringSSL/crypto/pkcs8/p5_pbev2.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999-2004. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999-2004. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8.cc b/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8.cc index ab39d17e..5aeed326 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8.cc +++ b/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8_x509.cc b/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8_x509.cc index f1f4634e..1ec991b8 100644 --- a/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8_x509.cc +++ b/Sources/CCryptoBoringSSL/crypto/pkcs8/pkcs8_x509.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/poly1305/internal.h b/Sources/CCryptoBoringSSL/crypto/poly1305/internal.h index f631c7ea..4615e3d2 100644 --- a/Sources/CCryptoBoringSSL/crypto/poly1305/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/poly1305/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305.cc b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305.cc index 49161789..da83abfb 100644 --- a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305.cc +++ b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_arm.cc b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_arm.cc index b8c9380e..c04c0a13 100644 --- a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_arm.cc +++ b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_arm.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_vec.cc b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_vec.cc index 0a535bb1..e85d1469 100644 --- a/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_vec.cc +++ b/Sources/CCryptoBoringSSL/crypto/poly1305/poly1305_vec.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/pool/internal.h b/Sources/CCryptoBoringSSL/crypto/pool/internal.h index 654d300b..1b1d71b7 100644 --- a/Sources/CCryptoBoringSSL/crypto/pool/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/pool/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/pool/pool.cc b/Sources/CCryptoBoringSSL/crypto/pool/pool.cc index 6761837e..80f6b412 100644 --- a/Sources/CCryptoBoringSSL/crypto/pool/pool.cc +++ b/Sources/CCryptoBoringSSL/crypto/pool/pool.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/deterministic.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/deterministic.cc index 3204cca1..396803c0 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/deterministic.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/deterministic.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/fork_detect.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/fork_detect.cc index af1e6077..537f8285 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/fork_detect.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/fork_detect.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/forkunsafe.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/forkunsafe.cc index ad69a428..8a26bb63 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/forkunsafe.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/forkunsafe.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/getentropy.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/getentropy.cc index 02ddc4d8..7d1a33dd 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/getentropy.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/getentropy.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/getrandom_fillin.h b/Sources/CCryptoBoringSSL/crypto/rand_extra/getrandom_fillin.h index 3d2a0c57..3e84ba43 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/getrandom_fillin.h +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/getrandom_fillin.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/ios.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/ios.cc index 33c4bf92..c5546f5a 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/ios.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/ios.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/passive.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/passive.cc index 65d01079..7349998c 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/passive.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/passive.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/rand_extra.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/rand_extra.cc index a0b9f10b..9812a7a0 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/rand_extra.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/rand_extra.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/sysrand_internal.h b/Sources/CCryptoBoringSSL/crypto/rand_extra/sysrand_internal.h index 94e99ef1..95e0d004 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/sysrand_internal.h +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/sysrand_internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/trusty.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/trusty.cc index 57e4148a..455a4c77 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/trusty.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/trusty.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/urandom.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/urandom.cc index 75863de6..fed9418b 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/urandom.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/urandom.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rand_extra/windows.cc b/Sources/CCryptoBoringSSL/crypto/rand_extra/windows.cc index 89351610..97ce4b60 100644 --- a/Sources/CCryptoBoringSSL/crypto/rand_extra/windows.cc +++ b/Sources/CCryptoBoringSSL/crypto/rand_extra/windows.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/refcount.cc b/Sources/CCryptoBoringSSL/crypto/refcount.cc index d2a886d0..9cfa2843 100644 --- a/Sources/CCryptoBoringSSL/crypto/refcount.cc +++ b/Sources/CCryptoBoringSSL/crypto/refcount.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_asn1.cc b/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_asn1.cc index 58988144..73544b3d 100644 --- a/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_asn1.cc +++ b/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_asn1.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2000. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2000. */ /* ==================================================================== * Copyright (c) 2000-2005 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_extra.cc b/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_extra.cc index 1558c7ea..979db094 100644 --- a/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_extra.cc +++ b/Sources/CCryptoBoringSSL/crypto/rsa_extra/rsa_extra.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/sha/sha1.cc b/Sources/CCryptoBoringSSL/crypto/sha/sha1.cc index f22b0683..6fdf9fbe 100644 --- a/Sources/CCryptoBoringSSL/crypto/sha/sha1.cc +++ b/Sources/CCryptoBoringSSL/crypto/sha/sha1.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/sha/sha256.cc b/Sources/CCryptoBoringSSL/crypto/sha/sha256.cc index 90838110..a3df1585 100644 --- a/Sources/CCryptoBoringSSL/crypto/sha/sha256.cc +++ b/Sources/CCryptoBoringSSL/crypto/sha/sha256.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/sha/sha512.cc b/Sources/CCryptoBoringSSL/crypto/sha/sha512.cc index a80aa47e..729ec6d9 100644 --- a/Sources/CCryptoBoringSSL/crypto/sha/sha512.cc +++ b/Sources/CCryptoBoringSSL/crypto/sha/sha512.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/siphash/siphash.cc b/Sources/CCryptoBoringSSL/crypto/siphash/siphash.cc index e820f4e0..14162fff 100644 --- a/Sources/CCryptoBoringSSL/crypto/siphash/siphash.cc +++ b/Sources/CCryptoBoringSSL/crypto/siphash/siphash.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/address.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/address.h index 7d21e8be..9c1e4ce6 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/address.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/address.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.cc b/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.cc index b8eed896..8577b21f 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.cc +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.h index 194a126a..a44269d7 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/fors.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/internal.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/internal.h index 6412853c..33b4dbd2 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.cc b/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.cc index 9df00e55..2d34d1c8 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.cc +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.h index b23c2e63..522729b8 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/merkle.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/params.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/params.h index d0812962..f5ff7325 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/params.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/params.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/slhdsa.cc b/Sources/CCryptoBoringSSL/crypto/slhdsa/slhdsa.cc index c9347724..78b7f53e 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/slhdsa.cc +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/slhdsa.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.cc b/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.cc index 88c33392..20e87a30 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.cc +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.h index 19d9cd39..9db174de 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/thash.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.cc b/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.cc index e9ca1185..934527a8 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.cc +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.h b/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.h index 8b3ca2de..b987eaf0 100644 --- a/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.h +++ b/Sources/CCryptoBoringSSL/crypto/slhdsa/wots.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx.cc deleted file mode 100644 index 0f5a5652..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx.cc +++ /dev/null @@ -1,140 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#define OPENSSL_UNSTABLE_EXPERIMENTAL_SPX -#include -#include - -#include "./spx_address.h" -#include "./spx_fors.h" -#include "./spx_merkle.h" -#include "./spx_params.h" -#include "./spx_util.h" -#include "./spx_thash.h" - -void SPX_generate_key(uint8_t out_public_key[SPX_PUBLIC_KEY_BYTES], - uint8_t out_secret_key[SPX_SECRET_KEY_BYTES]) { - uint8_t seed[3 * SPX_N]; - RAND_bytes(seed, 3 * SPX_N); - SPX_generate_key_from_seed(out_public_key, out_secret_key, seed); -} - -void SPX_generate_key_from_seed(uint8_t out_public_key[SPX_PUBLIC_KEY_BYTES], - uint8_t out_secret_key[SPX_SECRET_KEY_BYTES], - const uint8_t seed[3 * SPX_N]) { - // Initialize SK.seed || SK.prf || PK.seed from seed. - memcpy(out_secret_key, seed, 3 * SPX_N); - - // Initialize PK.seed from seed. - memcpy(out_public_key, seed + 2 * SPX_N, SPX_N); - - uint8_t addr[32] = {0}; - spx_set_layer_addr(addr, SPX_D - 1); - - // Set PK.root - spx_treehash(out_public_key + SPX_N, out_secret_key, 0, SPX_TREE_HEIGHT, - out_public_key, addr); - memcpy(out_secret_key + 3 * SPX_N, out_public_key + SPX_N, SPX_N); -} - -void SPX_sign(uint8_t out_signature[SPX_SIGNATURE_BYTES], - const uint8_t secret_key[SPX_SECRET_KEY_BYTES], - const uint8_t *msg, size_t msg_len, int randomized) { - uint8_t addr[32] = {0}; - const uint8_t *sk_seed = secret_key; - const uint8_t *sk_prf = secret_key + SPX_N; - const uint8_t *pk_seed = secret_key + 2 * SPX_N; - const uint8_t *pk_root = secret_key + 3 * SPX_N; - - uint8_t opt_rand[SPX_N] = {0}; - - if (randomized) { - RAND_bytes(opt_rand, SPX_N); - } else { - memcpy(opt_rand, pk_seed, SPX_N); - } - - // Derive randomizer r and copy it to signature. - uint8_t r[SPX_N]; - spx_thash_prfmsg(r, sk_prf, opt_rand, msg, msg_len); - memcpy(out_signature, r, SPX_N); - - uint8_t digest[SPX_DIGEST_SIZE]; - spx_thash_hmsg(digest, r, pk_seed, pk_root, msg, msg_len); - - uint8_t fors_digest[SPX_FORS_MSG_BYTES]; - memcpy(fors_digest, digest, SPX_FORS_MSG_BYTES); - - uint8_t *tmp_idx_tree = digest + SPX_FORS_MSG_BYTES; - uint8_t *tmp_idx_leaf = tmp_idx_tree + SPX_TREE_BYTES; - - uint64_t idx_tree = spx_to_uint64(tmp_idx_tree, SPX_TREE_BYTES); - idx_tree &= (~(uint64_t)0) >> (64 - SPX_TREE_BITS); - - uint32_t idx_leaf = (uint32_t)spx_to_uint64(tmp_idx_leaf, SPX_LEAF_BYTES); - idx_leaf &= (~(uint32_t)0) >> (32 - SPX_LEAF_BITS); - - spx_set_tree_addr(addr, idx_tree); - spx_set_type(addr, SPX_ADDR_TYPE_FORSTREE); - spx_set_keypair_addr(addr, idx_leaf); - - spx_fors_sign(out_signature + SPX_N, fors_digest, sk_seed, pk_seed, addr); - - uint8_t pk_fors[SPX_N]; - spx_fors_pk_from_sig(pk_fors, out_signature + SPX_N, fors_digest, pk_seed, - addr); - - spx_ht_sign(out_signature + SPX_N + SPX_FORS_BYTES, pk_fors, idx_tree, - idx_leaf, sk_seed, pk_seed); -} - -int SPX_verify(const uint8_t signature[SPX_SIGNATURE_BYTES], - const uint8_t public_key[SPX_SECRET_KEY_BYTES], - const uint8_t *msg, size_t msg_len) { - uint8_t addr[32] = {0}; - const uint8_t *pk_seed = public_key; - const uint8_t *pk_root = public_key + SPX_N; - - const uint8_t *r = signature; - const uint8_t *sig_fors = signature + SPX_N; - const uint8_t *sig_ht = sig_fors + SPX_FORS_BYTES; - - uint8_t digest[SPX_DIGEST_SIZE]; - spx_thash_hmsg(digest, r, pk_seed, pk_root, msg, msg_len); - - uint8_t fors_digest[SPX_FORS_MSG_BYTES]; - memcpy(fors_digest, digest, SPX_FORS_MSG_BYTES); - - uint8_t *tmp_idx_tree = digest + SPX_FORS_MSG_BYTES; - uint8_t *tmp_idx_leaf = tmp_idx_tree + SPX_TREE_BYTES; - - uint64_t idx_tree = spx_to_uint64(tmp_idx_tree, SPX_TREE_BYTES); - idx_tree &= (~(uint64_t)0) >> (64 - SPX_TREE_BITS); - - uint32_t idx_leaf = (uint32_t)spx_to_uint64(tmp_idx_leaf, SPX_LEAF_BYTES); - idx_leaf &= (~(uint32_t)0) >> (32 - SPX_LEAF_BITS); - - spx_set_tree_addr(addr, idx_tree); - spx_set_type(addr, SPX_ADDR_TYPE_FORSTREE); - spx_set_keypair_addr(addr, idx_leaf); - - uint8_t pk_fors[SPX_N]; - spx_fors_pk_from_sig(pk_fors, sig_fors, fors_digest, pk_seed, addr); - - return spx_ht_verify(sig_ht, pk_fors, idx_tree, idx_leaf, pk_root, pk_seed); -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_address.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_address.cc deleted file mode 100644 index 4e691572..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_address.cc +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include "../internal.h" -#include "./spx_address.h" -#include "./spx_util.h" - - -// Offsets of various fields in the address structure for SPHINCS+-SHA2-128s. - -// The byte used to specify the Merkle tree layer. -#define SPX_OFFSET_LAYER 0 - -// The start of the 8 byte field used to specify the tree. -#define SPX_OFFSET_TREE 1 - -// The byte used to specify the hash type (reason). -#define SPX_OFFSET_TYPE 9 - -// The high byte used to specify the key pair (which one-time signature). -#define SPX_OFFSET_KP_ADDR2 12 - -// The low byte used to specific the key pair. -#define SPX_OFFSET_KP_ADDR1 13 - -// The byte used to specify the chain address (which Winternitz chain). -#define SPX_OFFSET_CHAIN_ADDR 17 - -// The byte used to specify the hash address (where in the Winternitz chain). -#define SPX_OFFSET_HASH_ADDR 21 - -// The byte used to specify the height of this node in the FORS or Merkle tree. -#define SPX_OFFSET_TREE_HGT 17 - -// The start of the 4 byte field used to specify the node in the FORS or Merkle -// tree. -#define SPX_OFFSET_TREE_INDEX 18 - - -void spx_set_chain_addr(uint8_t addr[32], uint32_t chain) { - addr[SPX_OFFSET_CHAIN_ADDR] = (uint8_t)chain; -} - -void spx_set_hash_addr(uint8_t addr[32], uint32_t hash) { - addr[SPX_OFFSET_HASH_ADDR] = (uint8_t)hash; -} - -void spx_set_keypair_addr(uint8_t addr[32], uint32_t keypair) { - addr[SPX_OFFSET_KP_ADDR2] = (uint8_t)(keypair >> 8); - addr[SPX_OFFSET_KP_ADDR1] = (uint8_t)keypair; -} - -void spx_copy_keypair_addr(uint8_t out[32], const uint8_t in[32]) { - memcpy(out, in, SPX_OFFSET_TREE + 8); - out[SPX_OFFSET_KP_ADDR2] = in[SPX_OFFSET_KP_ADDR2]; - out[SPX_OFFSET_KP_ADDR1] = in[SPX_OFFSET_KP_ADDR1]; -} - -void spx_set_layer_addr(uint8_t addr[32], uint32_t layer) { - addr[SPX_OFFSET_LAYER] = (uint8_t)layer; -} - -void spx_set_tree_addr(uint8_t addr[32], uint64_t tree) { - spx_uint64_to_len_bytes(&addr[SPX_OFFSET_TREE], 8, tree); -} - -void spx_set_type(uint8_t addr[32], uint32_t type) { - // NIST draft relies on this setting parts of the address to 0, so we do it - // here to avoid confusion. - // - // The behavior here is only correct for the SHA2 instantiations. - memset(addr + 10, 0, 12); - addr[SPX_OFFSET_TYPE] = (uint8_t)type; -} - -void spx_set_tree_height(uint8_t addr[32], uint32_t tree_height) { - addr[SPX_OFFSET_TREE_HGT] = (uint8_t)tree_height; -} - -void spx_set_tree_index(uint8_t addr[32], uint32_t tree_index) { - CRYPTO_store_u32_be(&addr[SPX_OFFSET_TREE_INDEX], tree_index); -} - -uint32_t spx_get_tree_index(uint8_t addr[32]) { - return CRYPTO_load_u32_be(addr + SPX_OFFSET_TREE_INDEX); -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_address.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_address.h deleted file mode 100644 index adda4325..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_address.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_ADDRESS_H -#define OPENSSL_HEADER_CRYPTO_SPX_ADDRESS_H - -#include - -#if defined(__cplusplus) -extern "C" { -#endif - - -#define SPX_ADDR_TYPE_WOTS 0 -#define SPX_ADDR_TYPE_WOTSPK 1 -#define SPX_ADDR_TYPE_HASHTREE 2 -#define SPX_ADDR_TYPE_FORSTREE 3 -#define SPX_ADDR_TYPE_FORSPK 4 -#define SPX_ADDR_TYPE_WOTSPRF 5 -#define SPX_ADDR_TYPE_FORSPRF 6 - -void spx_set_chain_addr(uint8_t addr[32], uint32_t chain); -void spx_set_hash_addr(uint8_t addr[32], uint32_t hash); -void spx_set_keypair_addr(uint8_t addr[32], uint32_t keypair); -void spx_set_layer_addr(uint8_t addr[32], uint32_t layer); -void spx_set_tree_addr(uint8_t addr[32], uint64_t tree); -void spx_set_type(uint8_t addr[32], uint32_t type); -void spx_set_tree_height(uint8_t addr[32], uint32_t tree_height); -void spx_set_tree_index(uint8_t addr[32], uint32_t tree_index); -void spx_copy_keypair_addr(uint8_t out[32], const uint8_t in[32]); - -uint32_t spx_get_tree_index(uint8_t addr[32]); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_ADDRESS_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.cc deleted file mode 100644 index e270b2bd..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.cc +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include "./spx_address.h" -#include "./spx_fors.h" -#include "./spx_params.h" -#include "./spx_util.h" -#include "./spx_thash.h" - -void spx_fors_sk_gen(uint8_t *fors_sk, uint32_t idx, - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]) { - uint8_t sk_addr[32]; - memcpy(sk_addr, addr, sizeof(sk_addr)); - - spx_set_type(sk_addr, SPX_ADDR_TYPE_FORSPRF); - spx_copy_keypair_addr(sk_addr, addr); - spx_set_tree_index(sk_addr, idx); - spx_thash_prf(fors_sk, pk_seed, sk_seed, sk_addr); -} - -void spx_fors_treehash(uint8_t root_node[SPX_N], const uint8_t sk_seed[SPX_N], - uint32_t i /*target node index*/, - uint32_t z /*target node height*/, - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - - BSSL_CHECK(z <= SPX_FORS_HEIGHT); - BSSL_CHECK(i < (uint32_t)(SPX_FORS_TREES * (1 << (SPX_FORS_HEIGHT - z)))); - - if (z == 0) { - uint8_t sk[SPX_N]; - spx_set_tree_height(addr, 0); - spx_set_tree_index(addr, i); - spx_fors_sk_gen(sk, i, sk_seed, pk_seed, addr); - spx_thash_f(root_node, sk, pk_seed, addr); - } else { - // Stores left node and right node. - uint8_t nodes[2 * SPX_N]; - spx_fors_treehash(nodes, sk_seed, 2 * i, z - 1, pk_seed, addr); - spx_fors_treehash(nodes + SPX_N, sk_seed, 2 * i + 1, z - 1, pk_seed, addr); - spx_set_tree_height(addr, z); - spx_set_tree_index(addr, i); - spx_thash_h(root_node, nodes, pk_seed, addr); - } -} - -void spx_fors_sign(uint8_t *fors_sig, const uint8_t message[SPX_FORS_MSG_BYTES], - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]) { - uint32_t indices[SPX_FORS_TREES]; - - // Derive FORS indices compatible with the NIST changes. - spx_base_b(indices, SPX_FORS_TREES, message, /*log2_b=*/SPX_FORS_HEIGHT); - - for (size_t i = 0; i < SPX_FORS_TREES; ++i) { - spx_set_tree_height(addr, 0); - // Write the FORS secret key element to the correct position. - spx_fors_sk_gen(fors_sig + i * SPX_N * (SPX_FORS_HEIGHT + 1), - i * (1 << SPX_FORS_HEIGHT) + indices[i], sk_seed, pk_seed, - addr); - for (size_t j = 0; j < SPX_FORS_HEIGHT; ++j) { - size_t s = (indices[i] / (1 << j)) ^ 1; - // Write the FORS auth path element to the correct position. - spx_fors_treehash(fors_sig + SPX_N * (i * (SPX_FORS_HEIGHT + 1) + j + 1), - sk_seed, i * (1ULL << (SPX_FORS_HEIGHT - j)) + s, j, - pk_seed, addr); - } - } -} - -void spx_fors_pk_from_sig(uint8_t *fors_pk, - const uint8_t fors_sig[SPX_FORS_BYTES], - const uint8_t message[SPX_FORS_MSG_BYTES], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - uint32_t indices[SPX_FORS_TREES]; - uint8_t tmp[2 * SPX_N]; - uint8_t roots[SPX_FORS_TREES * SPX_N]; - - // Derive FORS indices compatible with the NIST changes. - spx_base_b(indices, SPX_FORS_TREES, message, /*log2_b=*/SPX_FORS_HEIGHT); - - for (size_t i = 0; i < SPX_FORS_TREES; ++i) { - // Pointer to current sk and authentication path - const uint8_t *sk = fors_sig + i * SPX_N * (SPX_FORS_HEIGHT + 1); - const uint8_t *auth = fors_sig + i * SPX_N * (SPX_FORS_HEIGHT + 1) + SPX_N; - uint8_t nodes[2 * SPX_N]; - - spx_set_tree_height(addr, 0); - spx_set_tree_index(addr, (i * (1 << SPX_FORS_HEIGHT)) + indices[i]); - - spx_thash_f(nodes, sk, pk_seed, addr); - - for (size_t j = 0; j < SPX_FORS_HEIGHT; ++j) { - spx_set_tree_height(addr, j + 1); - - // Even node - if (((indices[i] / (1 << j)) % 2) == 0) { - spx_set_tree_index(addr, spx_get_tree_index(addr) / 2); - memcpy(tmp, nodes, SPX_N); - memcpy(tmp + SPX_N, auth + j * SPX_N, SPX_N); - spx_thash_h(nodes + SPX_N, tmp, pk_seed, addr); - } else { - spx_set_tree_index(addr, (spx_get_tree_index(addr) - 1) / 2); - memcpy(tmp, auth + j * SPX_N, SPX_N); - memcpy(tmp + SPX_N, nodes, SPX_N); - spx_thash_h(nodes + SPX_N, tmp, pk_seed, addr); - } - memcpy(nodes, nodes + SPX_N, SPX_N); - } - memcpy(roots + i * SPX_N, nodes, SPX_N); - } - - uint8_t forspk_addr[32]; - memcpy(forspk_addr, addr, sizeof(forspk_addr)); - spx_set_type(forspk_addr, SPX_ADDR_TYPE_FORSPK); - spx_copy_keypair_addr(forspk_addr, addr); - spx_thash_tk(fors_pk, roots, pk_seed, forspk_addr); -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.h deleted file mode 100644 index cb6003e4..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_fors.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_FORS_H -#define OPENSSL_HEADER_CRYPTO_SPX_FORS_H - -#include - -#include "./spx_params.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Algorithm 13: Generate a FORS private key value. -void spx_fors_sk_gen(uint8_t *fors_sk, uint32_t idx, - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]); - -// Algorithm 14: Compute the root of a Merkle subtree of FORS public values. -void spx_fors_treehash(uint8_t root_node[SPX_N], const uint8_t sk_seed[SPX_N], - uint32_t i /*target node index*/, - uint32_t z /*target node height*/, - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Algorithm 15: Generate a FORS signature. -void spx_fors_sign(uint8_t *fors_sig, const uint8_t message[SPX_FORS_MSG_BYTES], - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]); - -// Algorithm 16: Compute a FORS public key from a FORS signature. -void spx_fors_pk_from_sig(uint8_t *fors_pk, - const uint8_t fors_sig[SPX_FORS_BYTES], - const uint8_t message[SPX_FORS_MSG_BYTES], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_FORS_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.cc deleted file mode 100644 index 02d3e214..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.cc +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include "./spx_address.h" -#include "./spx_merkle.h" -#include "./spx_params.h" -#include "./spx_thash.h" -#include "./spx_wots.h" - -void spx_treehash(uint8_t out_pk[SPX_N], const uint8_t sk_seed[SPX_N], - uint32_t i /*target node index*/, - uint32_t z /*target node height*/, - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - BSSL_CHECK(z <= SPX_TREE_HEIGHT); - BSSL_CHECK(i < (uint32_t)(1 << (SPX_TREE_HEIGHT - z))); - - if (z == 0) { - spx_set_type(addr, SPX_ADDR_TYPE_WOTS); - spx_set_keypair_addr(addr, i); - spx_wots_pk_gen(out_pk, sk_seed, pk_seed, addr); - } else { - // Stores left node and right node. - uint8_t nodes[2 * SPX_N]; - spx_treehash(nodes, sk_seed, 2 * i, z - 1, pk_seed, addr); - spx_treehash(nodes + SPX_N, sk_seed, 2 * i + 1, z - 1, pk_seed, addr); - spx_set_type(addr, SPX_ADDR_TYPE_HASHTREE); - spx_set_tree_height(addr, z); - spx_set_tree_index(addr, i); - spx_thash_h(out_pk, nodes, pk_seed, addr); - } -} - -void spx_xmss_sign(uint8_t *sig, const uint8_t msg[SPX_N], unsigned int idx, - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]) { - // Build authentication path - for (size_t j = 0; j < SPX_TREE_HEIGHT; ++j) { - unsigned int k = (idx >> j) ^ 1; - spx_treehash(sig + SPX_WOTS_BYTES + j * SPX_N, sk_seed, k, j, pk_seed, - addr); - } - - // Compute WOTS+ signature - spx_set_type(addr, SPX_ADDR_TYPE_WOTS); - spx_set_keypair_addr(addr, idx); - spx_wots_sign(sig, msg, sk_seed, pk_seed, addr); -} - -void spx_xmss_pk_from_sig(uint8_t *root, const uint8_t *xmss_sig, - unsigned int idx, const uint8_t msg[SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - // Stores node[0] and node[1] from Algorithm 10 - uint8_t node[2 * SPX_N]; - uint8_t tmp[2 * SPX_N]; - spx_set_type(addr, SPX_ADDR_TYPE_WOTS); - spx_set_keypair_addr(addr, idx); - spx_wots_pk_from_sig(node, xmss_sig, msg, pk_seed, addr); - - const uint8_t *auth = xmss_sig + SPX_WOTS_BYTES; - - spx_set_type(addr, SPX_ADDR_TYPE_HASHTREE); - spx_set_tree_index(addr, idx); - for (size_t k = 0; k < SPX_TREE_HEIGHT; ++k) { - spx_set_tree_height(addr, k + 1); - // Is even - if (((idx >> k) & 1) == 0) { - spx_set_tree_index(addr, spx_get_tree_index(addr) >> 1); - memcpy(tmp, node, SPX_N); - memcpy(tmp + SPX_N, auth + k * SPX_N, SPX_N); - spx_thash_h(node + SPX_N, tmp, pk_seed, addr); - } else { - spx_set_tree_index(addr, (spx_get_tree_index(addr) - 1) >> 1); - memcpy(tmp, auth + k * SPX_N, SPX_N); - memcpy(tmp + SPX_N, node, SPX_N); - spx_thash_h(node + SPX_N, tmp, pk_seed, addr); - } - memcpy(node, node + SPX_N, SPX_N); - } - memcpy(root, node, SPX_N); -} - -void spx_ht_sign(uint8_t *sig, const uint8_t message[SPX_N], uint64_t idx_tree, - uint32_t idx_leaf, const uint8_t sk_seed[SPX_N], - const uint8_t pk_seed[SPX_N]) { - uint8_t addr[32] = {0}; - spx_set_tree_addr(addr, idx_tree); - - // Layer 0 - uint8_t sig_tmp[SPX_XMSS_BYTES]; - spx_xmss_sign(sig_tmp, message, idx_leaf, sk_seed, pk_seed, addr); - memcpy(sig, sig_tmp, sizeof(sig_tmp)); - - uint8_t root[SPX_N]; - spx_xmss_pk_from_sig(root, sig_tmp, idx_leaf, message, pk_seed, addr); - - // All other layers - for (size_t j = 1; j < SPX_D; ++j) { - idx_leaf = idx_tree % (1 << SPX_TREE_HEIGHT); - idx_tree = idx_tree >> SPX_TREE_HEIGHT; - spx_set_layer_addr(addr, j); - spx_set_tree_addr(addr, idx_tree); - spx_xmss_sign(sig_tmp, root, idx_leaf, sk_seed, pk_seed, addr); - memcpy(sig + j * SPX_XMSS_BYTES, sig_tmp, sizeof(sig_tmp)); - - if (j < (SPX_D - 1)) { - spx_xmss_pk_from_sig(root, sig_tmp, idx_leaf, root, pk_seed, addr); - } - } -} - -int spx_ht_verify(const uint8_t sig[SPX_D * SPX_XMSS_BYTES], - const uint8_t message[SPX_N], uint64_t idx_tree, - uint32_t idx_leaf, const uint8_t pk_root[SPX_N], - const uint8_t pk_seed[SPX_N]) { - uint8_t addr[32] = {0}; - spx_set_tree_addr(addr, idx_tree); - - uint8_t sig_tmp[SPX_XMSS_BYTES]; - memcpy(sig_tmp, sig, sizeof(sig_tmp)); - - uint8_t node[SPX_N]; - spx_xmss_pk_from_sig(node, sig_tmp, idx_leaf, message, pk_seed, addr); - - for (size_t j = 1; j < SPX_D; ++j) { - idx_leaf = idx_tree % (1 << SPX_TREE_HEIGHT); - idx_tree = idx_tree >> SPX_TREE_HEIGHT; - spx_set_layer_addr(addr, j); - spx_set_tree_addr(addr, idx_tree); - // Get jth XMSS signature - memcpy(sig_tmp, sig + j * SPX_XMSS_BYTES, sizeof(sig_tmp)); - - spx_xmss_pk_from_sig(node, sig_tmp, idx_leaf, node, pk_seed, addr); - } - return memcmp(node, pk_root, SPX_N) == 0; -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.h deleted file mode 100644 index d659ab0a..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_merkle.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_MERKLE_H -#define OPENSSL_HEADER_CRYPTO_SPX_MERKLE_H - -#include - -#include - -#include "./spx_params.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Algorithm 8: Compute the root of a Merkle subtree of WOTS+ public keys. -void spx_treehash(uint8_t out_pk[SPX_N], const uint8_t sk_seed[SPX_N], - uint32_t i /*target node index*/, - uint32_t z /*target node height*/, - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Algorithm 9: Generate an XMSS signature. -void spx_xmss_sign(uint8_t *sig, const uint8_t msg[SPX_N], unsigned int idx, - const uint8_t sk_seed[SPX_N], const uint8_t pk_seed[SPX_N], - uint8_t addr[32]); - -// Algorithm 10: Compute an XMSS public key from an XMSS signature. -void spx_xmss_pk_from_sig(uint8_t *root, const uint8_t *xmss_sig, - unsigned int idx, const uint8_t msg[SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Algorithm 11: Generate a hypertree signature. -void spx_ht_sign(uint8_t *sig, const uint8_t message[SPX_N], uint64_t idx_tree, - uint32_t idx_leaf, const uint8_t sk_seed[SPX_N], - const uint8_t pk_seed[SPX_N]); - -// Algorithm 12: Verify a hypertree signature. -int spx_ht_verify(const uint8_t sig[SPX_D * SPX_XMSS_BYTES], - const uint8_t message[SPX_N], uint64_t idx_tree, - uint32_t idx_leaf, const uint8_t pk_root[SPX_N], - const uint8_t pk_seed[SPX_N]); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_MERKLE_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_params.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_params.h deleted file mode 100644 index cc7fd102..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_params.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_PARAMS_H -#define OPENSSL_HEADER_CRYPTO_SPX_PARAMS_H - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Output length of the hash function. -#define SPX_N 16 -// Total height of the tree structure. -#define SPX_FULL_HEIGHT 63 -// Number of subtree layers. -#define SPX_D 7 -// Height of the trees on each layer -#define SPX_TREE_HEIGHT 9 -// Height of each individual FORS tree. -#define SPX_FORS_HEIGHT 12 -// Total number of FORS tree used. -#define SPX_FORS_TREES 14 -// Size of a FORS signature -#define SPX_FORS_BYTES ((SPX_FORS_HEIGHT + 1) * SPX_FORS_TREES * SPX_N) - -// Winternitz parameter and derived values -#define SPX_WOTS_W 16 -#define SPX_WOTS_LOG_W 4 -#define SPX_WOTS_LEN1 32 -#define SPX_WOTS_LEN2 3 -#define SPX_WOTS_LEN 35 -#define SPX_WOTS_BYTES (SPX_N * SPX_WOTS_LEN) - -// XMSS sizes -#define SPX_XMSS_BYTES (SPX_WOTS_BYTES + (SPX_N * SPX_TREE_HEIGHT)) - -// Size of the message digest (NOTE: This is only correct for the SHA256 params -// here) -#define SPX_DIGEST_SIZE \ - (((SPX_FORS_TREES * SPX_FORS_HEIGHT) / 8) + \ - (((SPX_FULL_HEIGHT - SPX_TREE_HEIGHT) / 8) + 1) + (SPX_TREE_HEIGHT / 8) + \ - 1) - -// Compressed address size when using SHA256 -#define SPX_SHA256_ADDR_BYTES 22 - -// Size of the FORS message hash -#define SPX_FORS_MSG_BYTES ((SPX_FORS_HEIGHT * SPX_FORS_TREES + 7) / 8) -#define SPX_TREE_BITS (SPX_TREE_HEIGHT * (SPX_D - 1)) -#define SPX_TREE_BYTES ((SPX_TREE_BITS + 7) / 8) -#define SPX_LEAF_BITS SPX_TREE_HEIGHT -#define SPX_LEAF_BYTES ((SPX_LEAF_BITS + 7) / 8) - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_PARAMS_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.cc deleted file mode 100644 index 0fb86470..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.cc +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include -#include - -#include - -#include "./spx_params.h" -#include "./spx_util.h" -#include "./spx_thash.h" - -static void spx_thash(uint8_t *output, const uint8_t *input, - size_t input_blocks, const uint8_t pk_seed[SPX_N], - uint8_t addr[32]) { - uint8_t hash[32]; - SHA256_CTX sha256; - SHA256_Init(&sha256); - - // Process pubseed with padding to full block. - // TODO: This could be precomputed instead as it will be the same across all - // hash calls. - uint8_t padded_pk_seed[64] = {0}; - memcpy(padded_pk_seed, pk_seed, SPX_N); - - SHA256_Update(&sha256, padded_pk_seed, sizeof(padded_pk_seed)); - SHA256_Update(&sha256, addr, SPX_SHA256_ADDR_BYTES); - SHA256_Update(&sha256, input, input_blocks * SPX_N); - - SHA256_Final(hash, &sha256); - memcpy(output, hash, SPX_N); -} - -void spx_thash_f(uint8_t *output, const uint8_t input[SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - spx_thash(output, input, 1, pk_seed, addr); -} - -void spx_thash_h(uint8_t *output, const uint8_t input[2 * SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - spx_thash(output, input, 2, pk_seed, addr); -} - -void spx_thash_hmsg(uint8_t *output, const uint8_t r[SPX_N], - const uint8_t pk_seed[SPX_N], const uint8_t pk_root[SPX_N], - const uint8_t *msg, size_t msg_len) { - // MGF1-SHA-256(R || PK.seed || SHA-256(R || PK.seed || PK.root || M), m) - // input_buffer stores R || PK_SEED || SHA256(..) || 4-byte index - uint8_t input_buffer[2 * SPX_N + 32 + 4] = {0}; - memcpy(input_buffer, r, SPX_N); - memcpy(input_buffer + SPX_N, pk_seed, SPX_N); - - // Inner hash - SHA256_CTX ctx; - SHA256_Init(&ctx); - SHA256_Update(&ctx, r, SPX_N); - SHA256_Update(&ctx, pk_seed, SPX_N); - SHA256_Update(&ctx, pk_root, SPX_N); - SHA256_Update(&ctx, msg, msg_len); - // Write directly into the input buffer - SHA256_Final(input_buffer + 2 * SPX_N, &ctx); - - // MGF1-SHA-256 - uint8_t output_buffer[3 * 32]; - // Need to call SHA256 3 times for message digest. - static_assert(SPX_DIGEST_SIZE <= sizeof(output_buffer), - "not enough room for hashes"); - SHA256(input_buffer, sizeof(input_buffer), output_buffer); - input_buffer[2 * SPX_N + 32 + 3] = 1; - SHA256(input_buffer, sizeof(input_buffer), output_buffer + 32); - input_buffer[2 * SPX_N + 32 + 3] = 2; - SHA256(input_buffer, sizeof(input_buffer), output_buffer + 64); - - memcpy(output, output_buffer, SPX_DIGEST_SIZE); -} - -void spx_thash_prf(uint8_t *output, const uint8_t pk_seed[SPX_N], - const uint8_t sk_seed[SPX_N], uint8_t addr[32]) { - spx_thash(output, sk_seed, 1, pk_seed, addr); -} - -void spx_thash_prfmsg(uint8_t *output, const uint8_t sk_prf[SPX_N], - const uint8_t opt_rand[SPX_N], const uint8_t *msg, - size_t msg_len) { - // Compute HMAC-SHA256(sk_prf, opt_rand || msg). We inline HMAC to avoid an - // allocation. - uint8_t hmac_key[SHA256_CBLOCK] = {0}; - static_assert(SPX_N <= SHA256_CBLOCK, "HMAC key is larger than block size"); - memcpy(hmac_key, sk_prf, SPX_N); - for (size_t i = 0; i < sizeof(hmac_key); i++) { - hmac_key[i] ^= 0x36; - } - - uint8_t hash[SHA256_DIGEST_LENGTH]; - SHA256_CTX ctx; - SHA256_Init(&ctx); - SHA256_Update(&ctx, hmac_key, sizeof(hmac_key)); - SHA256_Update(&ctx, opt_rand, SPX_N); - SHA256_Update(&ctx, msg, msg_len); - SHA256_Final(hash, &ctx); - - for (size_t i = 0; i < sizeof(hmac_key); i++) { - hmac_key[i] ^= 0x36 ^ 0x5c; - } - SHA256_Init(&ctx); - SHA256_Update(&ctx, hmac_key, sizeof(hmac_key)); - SHA256_Update(&ctx, hash, sizeof(hash)); - SHA256_Final(hash, &ctx); - - // Truncate to SPX_N bytes - memcpy(output, hash, SPX_N); -} - -void spx_thash_tl(uint8_t *output, const uint8_t input[SPX_WOTS_BYTES], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - spx_thash(output, input, SPX_WOTS_LEN, pk_seed, addr); -} - -void spx_thash_tk(uint8_t *output, const uint8_t input[SPX_FORS_TREES * SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]) { - spx_thash(output, input, SPX_FORS_TREES, pk_seed, addr); -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.h deleted file mode 100644 index 5ba46c3f..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_thash.h +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_THASH_H -#define OPENSSL_HEADER_CRYPTO_SPX_THASH_H - -#include - -#include "./spx_params.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Implements F: a hash function takes an n-byte message as input and produces -// an n-byte output. -void spx_thash_f(uint8_t *output, const uint8_t input[SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Implements H: a hash function takes a 2*n-byte message as input and produces -// an n-byte output. -void spx_thash_h(uint8_t *output, const uint8_t input[2 * SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Implements Hmsg: a hash function used to generate the digest of the message -// to be signed. -void spx_thash_hmsg(uint8_t *output, const uint8_t r[SPX_N], - const uint8_t pk_seed[SPX_N], const uint8_t pk_root[SPX_N], - const uint8_t *msg, size_t msg_len); - -// Implements PRF: a pseudo-random function that is used to generate the secret -// values in WOTS+ and FORS private keys. -void spx_thash_prf(uint8_t *output, const uint8_t pk_seed[SPX_N], - const uint8_t sk_seed[SPX_N], uint8_t addr[32]); - -// Implements PRF: a pseudo-random function that is used to generate the -// randomizer r for the randomized hashing of the message to be signed. values -// in WOTS+ and FORS private keys. -void spx_thash_prfmsg(uint8_t *output, const uint8_t sk_prf[SPX_N], - const uint8_t opt_rand[SPX_N], const uint8_t *msg, - size_t msg_len); - -// Implements Tl: a hash function that maps an l*n-byte message to an n-byte -// message. -void spx_thash_tl(uint8_t *output, const uint8_t input[SPX_WOTS_BYTES], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - -// Implements Tk: a hash function that maps a k*n-byte message to an n-byte -// message. -void spx_thash_tk(uint8_t *output, const uint8_t input[SPX_FORS_TREES * SPX_N], - const uint8_t pk_seed[SPX_N], uint8_t addr[32]); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_THASH_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_util.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_util.cc deleted file mode 100644 index 20c0399f..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_util.cc +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include "./spx_util.h" - -void spx_uint64_to_len_bytes(uint8_t *output, size_t out_len, uint64_t input) { - for (size_t i = out_len; i > 0; --i) { - output[i - 1] = input & 0xff; - input = input >> 8; - } -} - -uint64_t spx_to_uint64(const uint8_t *input, size_t input_len) { - uint64_t tmp = 0; - for (size_t i = 0; i < input_len; ++i) { - tmp = 256 * tmp + input[i]; - } - return tmp; -} - -void spx_base_b(uint32_t *output, size_t out_len, const uint8_t *input, - unsigned int log2_b) { - int in = 0; - uint32_t out = 0; - uint32_t bits = 0; - uint32_t total = 0; - uint32_t base = UINT32_C(1) << log2_b; - - for (out = 0; out < out_len; ++out) { - while (bits < log2_b) { - total = (total << 8) + input[in]; - in++; - bits = bits + 8; - } - bits -= log2_b; - output[out] = (total >> bits) % base; - } -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_util.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_util.h deleted file mode 100644 index 26bc9c23..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_util.h +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_UTIL_H -#define OPENSSL_HEADER_CRYPTO_SPX_UTIL_H - -#include - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Encodes the integer value of input to out_len bytes in big-endian order. -// Note that input < 2^(8*out_len), as otherwise this function will truncate -// the least significant bytes of the integer representation. -void spx_uint64_to_len_bytes(uint8_t *output, size_t out_len, uint64_t input); - -uint64_t spx_to_uint64(const uint8_t *input, size_t input_len); - -// Compute the base 2^log2_b representation of X. -// -// As some of the parameter sets in https://eprint.iacr.org/2022/1725.pdf use -// a FORS height > 16 we use a uint32_t to store the output. -void spx_base_b(uint32_t *output, size_t out_len, const uint8_t *input, - unsigned int log2_b); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_UTIL_H diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.cc b/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.cc deleted file mode 100644 index 7b840046..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.cc +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include -#include - -#include "./spx_address.h" -#include "./spx_params.h" -#include "./spx_util.h" -#include "./spx_thash.h" -#include "./spx_wots.h" - -// Chaining function used in WOTS+. -static void chain(uint8_t *output, const uint8_t *input, uint32_t start, - uint32_t steps, const uint8_t *pub_seed, uint8_t addr[32]) { - memcpy(output, input, SPX_N); - - for (size_t i = start; i < (start + steps) && i < SPX_WOTS_W; ++i) { - spx_set_hash_addr(addr, i); - spx_thash_f(output, output, pub_seed, addr); - } -} - -void spx_wots_pk_from_sig(uint8_t *pk, const uint8_t *sig, const uint8_t *msg, - const uint8_t pub_seed[SPX_N], uint8_t addr[32]) { - uint8_t tmp[SPX_WOTS_BYTES]; - uint8_t wots_pk_addr[32]; - memcpy(wots_pk_addr, addr, sizeof(wots_pk_addr)); - - // Convert message to base w - uint32_t base_w_msg[SPX_WOTS_LEN]; - spx_base_b(base_w_msg, SPX_WOTS_LEN1, msg, /*log2_b=*/SPX_WOTS_LOG_W); - - // Compute checksum - uint64_t csum = 0; - for (size_t i = 0; i < SPX_WOTS_LEN1; ++i) { - csum += SPX_WOTS_W - 1 - base_w_msg[i]; - } - - // Convert csum to base w as in Algorithm 7, Line 9 - uint8_t csum_bytes[(SPX_WOTS_LEN2 * SPX_WOTS_LOG_W + 7) / 8]; - csum = csum << ((8 - ((SPX_WOTS_LEN2 * SPX_WOTS_LOG_W)) % 8) % 8); - spx_uint64_to_len_bytes(csum_bytes, sizeof(csum_bytes), csum); - - // Write the base w representation of csum to the end of the message. - spx_base_b(base_w_msg + SPX_WOTS_LEN1, SPX_WOTS_LEN2, csum_bytes, - /*log2_b=*/SPX_WOTS_LOG_W); - - // Compute chains - for (size_t i = 0; i < SPX_WOTS_LEN; ++i) { - spx_set_chain_addr(addr, i); - chain(tmp + i * SPX_N, sig + i * SPX_N, base_w_msg[i], - SPX_WOTS_W - 1 - base_w_msg[i], pub_seed, addr); - } - - // Compress pk - spx_set_type(wots_pk_addr, SPX_ADDR_TYPE_WOTSPK); - spx_copy_keypair_addr(wots_pk_addr, addr); - spx_thash_tl(pk, tmp, pub_seed, wots_pk_addr); -} - -void spx_wots_pk_gen(uint8_t *pk, const uint8_t sk_seed[SPX_N], - const uint8_t pub_seed[SPX_N], uint8_t addr[32]) { - uint8_t tmp[SPX_WOTS_BYTES]; - uint8_t tmp_sk[SPX_N]; - uint8_t wots_pk_addr[32], sk_addr[32]; - memcpy(wots_pk_addr, addr, sizeof(wots_pk_addr)); - memcpy(sk_addr, addr, sizeof(sk_addr)); - - spx_set_type(sk_addr, SPX_ADDR_TYPE_WOTSPRF); - spx_copy_keypair_addr(sk_addr, addr); - - for (size_t i = 0; i < SPX_WOTS_LEN; ++i) { - spx_set_chain_addr(sk_addr, i); - spx_thash_prf(tmp_sk, pub_seed, sk_seed, sk_addr); - spx_set_chain_addr(addr, i); - chain(tmp + i * SPX_N, tmp_sk, 0, SPX_WOTS_W - 1, pub_seed, addr); - } - - // Compress pk - spx_set_type(wots_pk_addr, SPX_ADDR_TYPE_WOTSPK); - spx_copy_keypair_addr(wots_pk_addr, addr); - spx_thash_tl(pk, tmp, pub_seed, wots_pk_addr); -} - -void spx_wots_sign(uint8_t *sig, const uint8_t msg[SPX_N], - const uint8_t sk_seed[SPX_N], const uint8_t pub_seed[SPX_N], - uint8_t addr[32]) { - // Convert message to base w - uint32_t base_w_msg[SPX_WOTS_LEN]; - spx_base_b(base_w_msg, SPX_WOTS_LEN1, msg, /*log2_b=*/SPX_WOTS_LOG_W); - - // Compute checksum - uint64_t csum = 0; - for (size_t i = 0; i < SPX_WOTS_LEN1; ++i) { - csum += SPX_WOTS_W - 1 - base_w_msg[i]; - } - - // Convert csum to base w as in Algorithm 6, Line 9 - uint8_t csum_bytes[(SPX_WOTS_LEN2 * SPX_WOTS_LOG_W + 7) / 8]; - csum = csum << ((8 - ((SPX_WOTS_LEN2 * SPX_WOTS_LOG_W)) % 8) % 8); - spx_uint64_to_len_bytes(csum_bytes, sizeof(csum_bytes), csum); - - // Write the base w representation of csum to the end of the message. - spx_base_b(base_w_msg + SPX_WOTS_LEN1, SPX_WOTS_LEN2, csum_bytes, - /*log2_b=*/SPX_WOTS_LOG_W); - - // Compute chains - uint8_t tmp_sk[SPX_N]; - uint8_t sk_addr[32]; - memcpy(sk_addr, addr, sizeof(sk_addr)); - spx_set_type(sk_addr, SPX_ADDR_TYPE_WOTSPRF); - spx_copy_keypair_addr(sk_addr, addr); - - for (size_t i = 0; i < SPX_WOTS_LEN; ++i) { - spx_set_chain_addr(sk_addr, i); - spx_thash_prf(tmp_sk, pub_seed, sk_seed, sk_addr); - spx_set_chain_addr(addr, i); - chain(sig + i * SPX_N, tmp_sk, 0, base_w_msg[i], pub_seed, addr); - } -} diff --git a/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.h b/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.h deleted file mode 100644 index e9087f3f..00000000 --- a/Sources/CCryptoBoringSSL/crypto/spx/spx_wots.h +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CRYPTO_SPX_WOTS_H -#define OPENSSL_HEADER_CRYPTO_SPX_WOTS_H - -#include - -#include "./spx_params.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -// Algorithm 5: Generate a WOTS+ public key. -void spx_wots_pk_gen(uint8_t *pk, const uint8_t sk_seed[SPX_N], - const uint8_t pub_seed[SPX_N], uint8_t addr[32]); - -// Algorithm 6: Generate a WOTS+ signature on an n-byte message. -void spx_wots_sign(uint8_t *sig, const uint8_t msg[SPX_N], - const uint8_t sk_seed[SPX_N], const uint8_t pub_seed[SPX_N], - uint8_t addr[32]); - -// Algorithm 7: Compute a WOTS+ public key from a message and its signature. -void spx_wots_pk_from_sig(uint8_t *pk, const uint8_t *sig, const uint8_t *msg, - const uint8_t pub_seed[SPX_N], uint8_t addr[32]); - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_CRYPTO_SPX_WOTS_H diff --git a/Sources/CCryptoBoringSSL/crypto/thread_none.cc b/Sources/CCryptoBoringSSL/crypto/thread_none.cc index e6f7d427..c43928b2 100644 --- a/Sources/CCryptoBoringSSL/crypto/thread_none.cc +++ b/Sources/CCryptoBoringSSL/crypto/thread_none.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/thread_pthread.cc b/Sources/CCryptoBoringSSL/crypto/thread_pthread.cc index e4c88838..2dcb409b 100644 --- a/Sources/CCryptoBoringSSL/crypto/thread_pthread.cc +++ b/Sources/CCryptoBoringSSL/crypto/thread_pthread.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/thread_win.cc b/Sources/CCryptoBoringSSL/crypto/thread_win.cc index c0d794f7..855b1b00 100644 --- a/Sources/CCryptoBoringSSL/crypto/thread_win.cc +++ b/Sources/CCryptoBoringSSL/crypto/thread_win.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/trust_token/internal.h b/Sources/CCryptoBoringSSL/crypto/trust_token/internal.h index afdee0a2..dffad0b9 100644 --- a/Sources/CCryptoBoringSSL/crypto/trust_token/internal.h +++ b/Sources/CCryptoBoringSSL/crypto/trust_token/internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/trust_token/pmbtoken.cc b/Sources/CCryptoBoringSSL/crypto/trust_token/pmbtoken.cc index 8f5c5349..8bceacf3 100644 --- a/Sources/CCryptoBoringSSL/crypto/trust_token/pmbtoken.cc +++ b/Sources/CCryptoBoringSSL/crypto/trust_token/pmbtoken.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/trust_token/trust_token.cc b/Sources/CCryptoBoringSSL/crypto/trust_token/trust_token.cc index bc13c604..8b986e76 100644 --- a/Sources/CCryptoBoringSSL/crypto/trust_token/trust_token.cc +++ b/Sources/CCryptoBoringSSL/crypto/trust_token/trust_token.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/trust_token/voprf.cc b/Sources/CCryptoBoringSSL/crypto/trust_token/voprf.cc index 25e2e45e..4d844be5 100644 --- a/Sources/CCryptoBoringSSL/crypto/trust_token/voprf.cc +++ b/Sources/CCryptoBoringSSL/crypto/trust_token/voprf.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/x509/algorithm.cc b/Sources/CCryptoBoringSSL/crypto/x509/algorithm.cc index 6479e8a8..51a2a84e 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/algorithm.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/algorithm.cc @@ -98,7 +98,7 @@ int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor) { // Default behavior: look up the OID for the algorithm/hash pair and encode // that. - const EVP_MD *digest = EVP_MD_CTX_md(ctx); + const EVP_MD *digest = EVP_MD_CTX_get0_md(ctx); if (digest == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_CONTEXT_NOT_INITIALISED); return 0; diff --git a/Sources/CCryptoBoringSSL/crypto/x509/policy.cc b/Sources/CCryptoBoringSSL/crypto/x509/policy.cc index 16f6d00b..af9e21d8 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/policy.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/policy.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/crypto/x509/rsa_pss.cc b/Sources/CCryptoBoringSSL/crypto/x509/rsa_pss.cc index fc40fdf2..73e0dd02 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/rsa_pss.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/rsa_pss.cc @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2006. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/x509/v3_alt.cc b/Sources/CCryptoBoringSSL/crypto/x509/v3_alt.cc index 6bb43dc2..4d9e59e4 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/v3_alt.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/v3_alt.cc @@ -1,6 +1,5 @@ /* - * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. + * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/x509/v3_ncons.cc b/Sources/CCryptoBoringSSL/crypto/x509/v3_ncons.cc index 46df627a..4e8f8161 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/v3_ncons.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/v3_ncons.cc @@ -1,6 +1,5 @@ /* - * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. + * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 2003 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/x509/v3_pcons.cc b/Sources/CCryptoBoringSSL/crypto/x509/v3_pcons.cc index 74f1ab82..4168f363 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/v3_pcons.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/v3_pcons.cc @@ -1,6 +1,5 @@ /* - * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. + * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 2003 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/x509/v3_pmaps.cc b/Sources/CCryptoBoringSSL/crypto/x509/v3_pmaps.cc index 624e07a8..0c1d5627 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/v3_pmaps.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/v3_pmaps.cc @@ -1,6 +1,5 @@ /* - * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. + * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 2003 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/crypto/x509/v3_utl.cc b/Sources/CCryptoBoringSSL/crypto/x509/v3_utl.cc index b43b3a15..ac51903f 100644 --- a/Sources/CCryptoBoringSSL/crypto/x509/v3_utl.cc +++ b/Sources/CCryptoBoringSSL/crypto/x509/v3_utl.cc @@ -1,6 +1,5 @@ /* - * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. + * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-apple.S b/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-apple.S new file mode 100644 index 00000000..dd21aab8 --- /dev/null +++ b/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-apple.S @@ -0,0 +1,2269 @@ +#define BORINGSSL_PREFIX CCryptoBoringSSL +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. + +#include + +#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) +.section __DATA,__const +.p2align 6 + + +L$bswap_mask: +.quad 0x08090a0b0c0d0e0f, 0x0001020304050607 + + + + + + + + +L$gfpoly: +.quad 1, 0xc200000000000000 + + +L$gfpoly_and_internal_carrybit: +.quad 1, 0xc200000000000001 + + + + + +L$ctr_pattern: +.quad 0, 0 +.quad 1, 0 +L$inc_2blocks: +.quad 2, 0 +.quad 3, 0 +L$inc_4blocks: +.quad 4, 0 + +.text +.globl _gcm_gmult_vpclmulqdq_avx10 +.private_extern _gcm_gmult_vpclmulqdq_avx10 + +.p2align 5 +_gcm_gmult_vpclmulqdq_avx10: + + +_CET_ENDBR + + + + vmovdqu (%rdi),%xmm0 + vmovdqu L$bswap_mask(%rip),%xmm1 + vmovdqu 256-16(%rsi),%xmm2 + vmovdqu L$gfpoly(%rip),%xmm3 + vpshufb %xmm1,%xmm0,%xmm0 + + vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 + vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 + vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 + vpxord %xmm6,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 + vpshufd $0x4e,%xmm4,%xmm4 + vpternlogd $0x96,%xmm6,%xmm4,%xmm5 + vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 + vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 + vpshufd $0x4e,%xmm5,%xmm5 + vpternlogd $0x96,%xmm4,%xmm5,%xmm0 + + + vpshufb %xmm1,%xmm0,%xmm0 + vmovdqu %xmm0,(%rdi) + ret + + + +.globl _gcm_init_vpclmulqdq_avx10 +.private_extern _gcm_init_vpclmulqdq_avx10 + +.p2align 5 +_gcm_init_vpclmulqdq_avx10: + + +_CET_ENDBR + + leaq 256-32(%rdi),%r8 + + + + vpshufd $0x4e,(%rsi),%xmm3 + + + + + + + + + + + + + + + + + vpshufd $0xd3,%xmm3,%xmm0 + vpsrad $31,%xmm0,%xmm0 + vpaddq %xmm3,%xmm3,%xmm3 + + vpternlogd $0x78,L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm3 + + + vbroadcasti32x4 L$gfpoly(%rip),%ymm5 + + + + + + + + + vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 + vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 + vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm3,%xmm3,%xmm4 + vpclmulqdq $0x01,%xmm1,%xmm5,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm4 + + + + vinserti128 $1,%xmm3,%ymm4,%ymm3 + vinserti128 $1,%xmm4,%ymm4,%ymm4 + + vmovdqu8 %ymm3,(%r8) + + + + + + movl $7,%eax +L$precompute_next__func1: + subq $32,%r8 + vpclmulqdq $0x00,%ymm4,%ymm3,%ymm0 + vpclmulqdq $0x01,%ymm4,%ymm3,%ymm1 + vpclmulqdq $0x10,%ymm4,%ymm3,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 + vpshufd $0x4e,%ymm0,%ymm0 + vpternlogd $0x96,%ymm2,%ymm0,%ymm1 + vpclmulqdq $0x11,%ymm4,%ymm3,%ymm3 + vpclmulqdq $0x01,%ymm1,%ymm5,%ymm0 + vpshufd $0x4e,%ymm1,%ymm1 + vpternlogd $0x96,%ymm0,%ymm1,%ymm3 + + vmovdqu8 %ymm3,(%r8) + decl %eax + jnz L$precompute_next__func1 + + vzeroupper + ret + + + +.globl _gcm_ghash_vpclmulqdq_avx10_256 +.private_extern _gcm_ghash_vpclmulqdq_avx10_256 + +.p2align 5 +_gcm_ghash_vpclmulqdq_avx10_256: + + +_CET_ENDBR + + + + + + + vmovdqu L$bswap_mask(%rip),%xmm4 + vmovdqu L$gfpoly(%rip),%xmm10 + + + vmovdqu (%rdi),%xmm5 + vpshufb %xmm4,%xmm5,%xmm5 + + + cmpq $32,%rcx + jb L$aad_blockbyblock__func1 + + + + vshufi64x2 $0,%ymm4,%ymm4,%ymm4 + vshufi64x2 $0,%ymm10,%ymm10,%ymm10 + + + vmovdqu8 256-32(%rsi),%ymm9 + + cmpq $128-1,%rcx + jbe L$aad_loop_1x__func1 + + + vmovdqu8 256-128(%rsi),%ymm6 + vmovdqu8 256-96(%rsi),%ymm7 + vmovdqu8 256-64(%rsi),%ymm8 + + +L$aad_loop_4x__func1: + vmovdqu8 0(%rdx),%ymm0 + vmovdqu8 32(%rdx),%ymm1 + vmovdqu8 64(%rdx),%ymm2 + vmovdqu8 96(%rdx),%ymm3 + vpshufb %ymm4,%ymm0,%ymm0 + vpxord %ymm5,%ymm0,%ymm0 + vpshufb %ymm4,%ymm1,%ymm1 + vpshufb %ymm4,%ymm2,%ymm2 + vpshufb %ymm4,%ymm3,%ymm3 + vpclmulqdq $0x00,%ymm6,%ymm0,%ymm5 + vpclmulqdq $0x00,%ymm7,%ymm1,%ymm11 + vpclmulqdq $0x00,%ymm8,%ymm2,%ymm12 + vpxord %ymm11,%ymm5,%ymm5 + vpclmulqdq $0x00,%ymm9,%ymm3,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm5 + vpclmulqdq $0x01,%ymm6,%ymm0,%ymm11 + vpclmulqdq $0x01,%ymm7,%ymm1,%ymm12 + vpclmulqdq $0x01,%ymm8,%ymm2,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x01,%ymm9,%ymm3,%ymm12 + vpclmulqdq $0x10,%ymm6,%ymm0,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x10,%ymm7,%ymm1,%ymm12 + vpclmulqdq $0x10,%ymm8,%ymm2,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x01,%ymm5,%ymm10,%ymm13 + vpclmulqdq $0x10,%ymm9,%ymm3,%ymm12 + vpxord %ymm12,%ymm11,%ymm11 + vpshufd $0x4e,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm6,%ymm0,%ymm0 + vpclmulqdq $0x11,%ymm7,%ymm1,%ymm1 + vpclmulqdq $0x11,%ymm8,%ymm2,%ymm2 + vpternlogd $0x96,%ymm13,%ymm5,%ymm11 + vpclmulqdq $0x11,%ymm9,%ymm3,%ymm3 + vpternlogd $0x96,%ymm2,%ymm1,%ymm0 + vpclmulqdq $0x01,%ymm11,%ymm10,%ymm12 + vpxord %ymm3,%ymm0,%ymm5 + vpshufd $0x4e,%ymm11,%ymm11 + vpternlogd $0x96,%ymm12,%ymm11,%ymm5 + vextracti32x4 $1,%ymm5,%xmm0 + vpxord %xmm0,%xmm5,%xmm5 + + subq $-128,%rdx + addq $-128,%rcx + cmpq $128-1,%rcx + ja L$aad_loop_4x__func1 + + + cmpq $32,%rcx + jb L$aad_large_done__func1 +L$aad_loop_1x__func1: + vmovdqu8 (%rdx),%ymm0 + vpshufb %ymm4,%ymm0,%ymm0 + vpxord %ymm0,%ymm5,%ymm5 + vpclmulqdq $0x00,%ymm9,%ymm5,%ymm0 + vpclmulqdq $0x01,%ymm9,%ymm5,%ymm1 + vpclmulqdq $0x10,%ymm9,%ymm5,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vpclmulqdq $0x01,%ymm0,%ymm10,%ymm2 + vpshufd $0x4e,%ymm0,%ymm0 + vpternlogd $0x96,%ymm2,%ymm0,%ymm1 + vpclmulqdq $0x11,%ymm9,%ymm5,%ymm5 + vpclmulqdq $0x01,%ymm1,%ymm10,%ymm0 + vpshufd $0x4e,%ymm1,%ymm1 + vpternlogd $0x96,%ymm0,%ymm1,%ymm5 + + vextracti32x4 $1,%ymm5,%xmm0 + vpxord %xmm0,%xmm5,%xmm5 + + addq $32,%rdx + subq $32,%rcx + cmpq $32,%rcx + jae L$aad_loop_1x__func1 + +L$aad_large_done__func1: + + + vzeroupper + + +L$aad_blockbyblock__func1: + testq %rcx,%rcx + jz L$aad_done__func1 + vmovdqu 256-16(%rsi),%xmm9 +L$aad_loop_blockbyblock__func1: + vmovdqu (%rdx),%xmm0 + vpshufb %xmm4,%xmm0,%xmm0 + vpxor %xmm0,%xmm5,%xmm5 + vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 + vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 + vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm5 + + addq $16,%rdx + subq $16,%rcx + jnz L$aad_loop_blockbyblock__func1 + +L$aad_done__func1: + + vpshufb %xmm4,%xmm5,%xmm5 + vmovdqu %xmm5,(%rdi) + ret + + + +.globl _aes_gcm_enc_update_vaes_avx10_256 +.private_extern _aes_gcm_enc_update_vaes_avx10_256 + +.p2align 5 +_aes_gcm_enc_update_vaes_avx10_256: + + +_CET_ENDBR + pushq %r12 + + + movq 16(%rsp),%r12 +#ifdef BORINGSSL_DISPATCH_TEST + + movb $1,_BORINGSSL_function_hit+6(%rip) +#endif + + vbroadcasti32x4 L$bswap_mask(%rip),%ymm8 + vbroadcasti32x4 L$gfpoly(%rip),%ymm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%ymm12 + vpshufb %ymm8,%ymm12,%ymm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%ymm13 + vbroadcasti32x4 (%r11),%ymm14 + + + vpaddd L$ctr_pattern(%rip),%ymm12,%ymm12 + + + vbroadcasti32x4 L$inc_2blocks(%rip),%ymm11 + + + + cmpq $128-1,%rdx + jbe L$crypt_loop_4x_done__func1 + + + vmovdqu8 256-128(%r9),%ymm27 + vmovdqu8 256-96(%r9),%ymm28 + vmovdqu8 256-64(%r9),%ymm29 + vmovdqu8 256-32(%r9),%ymm30 + + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + leaq 16(%rcx),%rax +L$vaesenc_loop_first_4_vecs__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_first_4_vecs__func1 + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + jbe L$ghash_last_ciphertext_4x__func1 + vbroadcasti32x4 -144(%r11),%ymm15 + vbroadcasti32x4 -128(%r11),%ymm16 + vbroadcasti32x4 -112(%r11),%ymm17 + vbroadcasti32x4 -96(%r11),%ymm18 + vbroadcasti32x4 -80(%r11),%ymm19 + vbroadcasti32x4 -64(%r11),%ymm20 + vbroadcasti32x4 -48(%r11),%ymm21 + vbroadcasti32x4 -32(%r11),%ymm22 + vbroadcasti32x4 -16(%r11),%ymm23 +L$crypt_loop_4x__func1: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + cmpl $24,%r10d + jl L$aes128__func1 + je L$aes192__func1 + + vbroadcasti32x4 -208(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -192(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +L$aes192__func1: + vbroadcasti32x4 -176(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -160(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +L$aes128__func1: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + + vaesenc %ymm15,%ymm0,%ymm0 + vaesenc %ymm15,%ymm1,%ymm1 + vaesenc %ymm15,%ymm2,%ymm2 + vaesenc %ymm15,%ymm3,%ymm3 + + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + + vaesenc %ymm16,%ymm0,%ymm0 + vaesenc %ymm16,%ymm1,%ymm1 + vaesenc %ymm16,%ymm2,%ymm2 + vaesenc %ymm16,%ymm3,%ymm3 + + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + + vaesenc %ymm17,%ymm0,%ymm0 + vaesenc %ymm17,%ymm1,%ymm1 + vaesenc %ymm17,%ymm2,%ymm2 + vaesenc %ymm17,%ymm3,%ymm3 + + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + + vaesenc %ymm18,%ymm0,%ymm0 + vaesenc %ymm18,%ymm1,%ymm1 + vaesenc %ymm18,%ymm2,%ymm2 + vaesenc %ymm18,%ymm3,%ymm3 + + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + + vaesenc %ymm19,%ymm0,%ymm0 + vaesenc %ymm19,%ymm1,%ymm1 + vaesenc %ymm19,%ymm2,%ymm2 + vaesenc %ymm19,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + + vaesenc %ymm20,%ymm0,%ymm0 + vaesenc %ymm20,%ymm1,%ymm1 + vaesenc %ymm20,%ymm2,%ymm2 + vaesenc %ymm20,%ymm3,%ymm3 + + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + + vaesenc %ymm21,%ymm0,%ymm0 + vaesenc %ymm21,%ymm1,%ymm1 + vaesenc %ymm21,%ymm2,%ymm2 + vaesenc %ymm21,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + + vaesenc %ymm22,%ymm0,%ymm0 + vaesenc %ymm22,%ymm1,%ymm1 + vaesenc %ymm22,%ymm2,%ymm2 + vaesenc %ymm22,%ymm3,%ymm3 + + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + + vaesenc %ymm23,%ymm0,%ymm0 + vaesenc %ymm23,%ymm1,%ymm1 + vaesenc %ymm23,%ymm2,%ymm2 + vaesenc %ymm23,%ymm3,%ymm3 + + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + ja L$crypt_loop_4x__func1 +L$ghash_last_ciphertext_4x__func1: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + +L$crypt_loop_4x_done__func1: + + testq %rdx,%rdx + jz L$done__func1 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $32,%rdx + jb L$partial_vec__func1 + +L$crypt_loop_1x__func1: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_full_vec__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_full_vec__func1 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1 + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi) + + + vmovdqu8 (%r8),%ymm30 + vpshufb %ymm8,%ymm0,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $32,%r8 + addq $32,%rdi + addq $32,%rsi + subq $32,%rdx + cmpq $32,%rdx + jae L$crypt_loop_1x__func1 + + testq %rdx,%rdx + jz L$reduce__func1 + +L$partial_vec__func1: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k2 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_partialvec__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_partialvec__func1 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1{%k1}{z} + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%ymm30{%k2}{z} + vmovdqu8 %ymm0,%ymm1{%k1}{z} + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + +L$reduce__func1: + + vpclmulqdq $0x01,%ymm4,%ymm31,%ymm0 + vpshufd $0x4e,%ymm4,%ymm4 + vpternlogd $0x96,%ymm0,%ymm4,%ymm5 + vpclmulqdq $0x01,%ymm5,%ymm31,%ymm0 + vpshufd $0x4e,%ymm5,%ymm5 + vpternlogd $0x96,%ymm0,%ymm5,%ymm6 + + vextracti32x4 $1,%ymm6,%xmm0 + vpxord %xmm0,%xmm6,%xmm10 + + +L$done__func1: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 + + ret + + + +.globl _aes_gcm_dec_update_vaes_avx10_256 +.private_extern _aes_gcm_dec_update_vaes_avx10_256 + +.p2align 5 +_aes_gcm_dec_update_vaes_avx10_256: + + +_CET_ENDBR + pushq %r12 + + + movq 16(%rsp),%r12 + + vbroadcasti32x4 L$bswap_mask(%rip),%ymm8 + vbroadcasti32x4 L$gfpoly(%rip),%ymm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%ymm12 + vpshufb %ymm8,%ymm12,%ymm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%ymm13 + vbroadcasti32x4 (%r11),%ymm14 + + + vpaddd L$ctr_pattern(%rip),%ymm12,%ymm12 + + + vbroadcasti32x4 L$inc_2blocks(%rip),%ymm11 + + + + cmpq $128-1,%rdx + jbe L$crypt_loop_4x_done__func2 + + + vmovdqu8 256-128(%r9),%ymm27 + vmovdqu8 256-96(%r9),%ymm28 + vmovdqu8 256-64(%r9),%ymm29 + vmovdqu8 256-32(%r9),%ymm30 + vbroadcasti32x4 -144(%r11),%ymm15 + vbroadcasti32x4 -128(%r11),%ymm16 + vbroadcasti32x4 -112(%r11),%ymm17 + vbroadcasti32x4 -96(%r11),%ymm18 + vbroadcasti32x4 -80(%r11),%ymm19 + vbroadcasti32x4 -64(%r11),%ymm20 + vbroadcasti32x4 -48(%r11),%ymm21 + vbroadcasti32x4 -32(%r11),%ymm22 + vbroadcasti32x4 -16(%r11),%ymm23 +L$crypt_loop_4x__func2: + vmovdqu8 0(%rdi),%ymm4 + vmovdqu8 32(%rdi),%ymm5 + vmovdqu8 64(%rdi),%ymm6 + vmovdqu8 96(%rdi),%ymm7 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + cmpl $24,%r10d + jl L$aes128__func2 + je L$aes192__func2 + + vbroadcasti32x4 -208(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -192(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +L$aes192__func2: + vbroadcasti32x4 -176(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -160(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +L$aes128__func2: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + + vaesenc %ymm15,%ymm0,%ymm0 + vaesenc %ymm15,%ymm1,%ymm1 + vaesenc %ymm15,%ymm2,%ymm2 + vaesenc %ymm15,%ymm3,%ymm3 + + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + + vaesenc %ymm16,%ymm0,%ymm0 + vaesenc %ymm16,%ymm1,%ymm1 + vaesenc %ymm16,%ymm2,%ymm2 + vaesenc %ymm16,%ymm3,%ymm3 + + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + + vaesenc %ymm17,%ymm0,%ymm0 + vaesenc %ymm17,%ymm1,%ymm1 + vaesenc %ymm17,%ymm2,%ymm2 + vaesenc %ymm17,%ymm3,%ymm3 + + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + + vaesenc %ymm18,%ymm0,%ymm0 + vaesenc %ymm18,%ymm1,%ymm1 + vaesenc %ymm18,%ymm2,%ymm2 + vaesenc %ymm18,%ymm3,%ymm3 + + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + + vaesenc %ymm19,%ymm0,%ymm0 + vaesenc %ymm19,%ymm1,%ymm1 + vaesenc %ymm19,%ymm2,%ymm2 + vaesenc %ymm19,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + + vaesenc %ymm20,%ymm0,%ymm0 + vaesenc %ymm20,%ymm1,%ymm1 + vaesenc %ymm20,%ymm2,%ymm2 + vaesenc %ymm20,%ymm3,%ymm3 + + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + + vaesenc %ymm21,%ymm0,%ymm0 + vaesenc %ymm21,%ymm1,%ymm1 + vaesenc %ymm21,%ymm2,%ymm2 + vaesenc %ymm21,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + + vaesenc %ymm22,%ymm0,%ymm0 + vaesenc %ymm22,%ymm1,%ymm1 + vaesenc %ymm22,%ymm2,%ymm2 + vaesenc %ymm22,%ymm3,%ymm3 + + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + + vaesenc %ymm23,%ymm0,%ymm0 + vaesenc %ymm23,%ymm1,%ymm1 + vaesenc %ymm23,%ymm2,%ymm2 + vaesenc %ymm23,%ymm3,%ymm3 + + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + ja L$crypt_loop_4x__func2 +L$crypt_loop_4x_done__func2: + + testq %rdx,%rdx + jz L$done__func2 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $32,%rdx + jb L$partial_vec__func2 + +L$crypt_loop_1x__func2: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_full_vec__func2: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_full_vec__func2 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1 + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi) + + + vmovdqu8 (%r8),%ymm30 + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $32,%r8 + addq $32,%rdi + addq $32,%rsi + subq $32,%rdx + cmpq $32,%rdx + jae L$crypt_loop_1x__func2 + + testq %rdx,%rdx + jz L$reduce__func2 + +L$partial_vec__func2: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k2 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_partialvec__func2: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_partialvec__func2 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1{%k1}{z} + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%ymm30{%k2}{z} + + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + +L$reduce__func2: + + vpclmulqdq $0x01,%ymm4,%ymm31,%ymm0 + vpshufd $0x4e,%ymm4,%ymm4 + vpternlogd $0x96,%ymm0,%ymm4,%ymm5 + vpclmulqdq $0x01,%ymm5,%ymm31,%ymm0 + vpshufd $0x4e,%ymm5,%ymm5 + vpternlogd $0x96,%ymm0,%ymm5,%ymm6 + + vextracti32x4 $1,%ymm6,%xmm0 + vpxord %xmm0,%xmm6,%xmm10 + + +L$done__func2: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 + + ret + + + +.globl _gcm_ghash_vpclmulqdq_avx10_512 +.private_extern _gcm_ghash_vpclmulqdq_avx10_512 + +.p2align 5 +_gcm_ghash_vpclmulqdq_avx10_512: + + +_CET_ENDBR + + + + + + + vmovdqu L$bswap_mask(%rip),%xmm4 + vmovdqu L$gfpoly(%rip),%xmm10 + + + vmovdqu (%rdi),%xmm5 + vpshufb %xmm4,%xmm5,%xmm5 + + + cmpq $64,%rcx + jb L$aad_blockbyblock__func2 + + + + vshufi64x2 $0,%zmm4,%zmm4,%zmm4 + vshufi64x2 $0,%zmm10,%zmm10,%zmm10 + + + vmovdqu8 256-64(%rsi),%zmm9 + + cmpq $256-1,%rcx + jbe L$aad_loop_1x__func2 + + + vmovdqu8 256-256(%rsi),%zmm6 + vmovdqu8 256-192(%rsi),%zmm7 + vmovdqu8 256-128(%rsi),%zmm8 + + +L$aad_loop_4x__func2: + vmovdqu8 0(%rdx),%zmm0 + vmovdqu8 64(%rdx),%zmm1 + vmovdqu8 128(%rdx),%zmm2 + vmovdqu8 192(%rdx),%zmm3 + vpshufb %zmm4,%zmm0,%zmm0 + vpxord %zmm5,%zmm0,%zmm0 + vpshufb %zmm4,%zmm1,%zmm1 + vpshufb %zmm4,%zmm2,%zmm2 + vpshufb %zmm4,%zmm3,%zmm3 + vpclmulqdq $0x00,%zmm6,%zmm0,%zmm5 + vpclmulqdq $0x00,%zmm7,%zmm1,%zmm11 + vpclmulqdq $0x00,%zmm8,%zmm2,%zmm12 + vpxord %zmm11,%zmm5,%zmm5 + vpclmulqdq $0x00,%zmm9,%zmm3,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm5 + vpclmulqdq $0x01,%zmm6,%zmm0,%zmm11 + vpclmulqdq $0x01,%zmm7,%zmm1,%zmm12 + vpclmulqdq $0x01,%zmm8,%zmm2,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x01,%zmm9,%zmm3,%zmm12 + vpclmulqdq $0x10,%zmm6,%zmm0,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x10,%zmm7,%zmm1,%zmm12 + vpclmulqdq $0x10,%zmm8,%zmm2,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x01,%zmm5,%zmm10,%zmm13 + vpclmulqdq $0x10,%zmm9,%zmm3,%zmm12 + vpxord %zmm12,%zmm11,%zmm11 + vpshufd $0x4e,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm6,%zmm0,%zmm0 + vpclmulqdq $0x11,%zmm7,%zmm1,%zmm1 + vpclmulqdq $0x11,%zmm8,%zmm2,%zmm2 + vpternlogd $0x96,%zmm13,%zmm5,%zmm11 + vpclmulqdq $0x11,%zmm9,%zmm3,%zmm3 + vpternlogd $0x96,%zmm2,%zmm1,%zmm0 + vpclmulqdq $0x01,%zmm11,%zmm10,%zmm12 + vpxord %zmm3,%zmm0,%zmm5 + vpshufd $0x4e,%zmm11,%zmm11 + vpternlogd $0x96,%zmm12,%zmm11,%zmm5 + vextracti32x4 $1,%zmm5,%xmm0 + vextracti32x4 $2,%zmm5,%xmm1 + vextracti32x4 $3,%zmm5,%xmm2 + vpxord %xmm0,%xmm5,%xmm5 + vpternlogd $0x96,%xmm1,%xmm2,%xmm5 + + subq $-256,%rdx + addq $-256,%rcx + cmpq $256-1,%rcx + ja L$aad_loop_4x__func2 + + + cmpq $64,%rcx + jb L$aad_large_done__func2 +L$aad_loop_1x__func2: + vmovdqu8 (%rdx),%zmm0 + vpshufb %zmm4,%zmm0,%zmm0 + vpxord %zmm0,%zmm5,%zmm5 + vpclmulqdq $0x00,%zmm9,%zmm5,%zmm0 + vpclmulqdq $0x01,%zmm9,%zmm5,%zmm1 + vpclmulqdq $0x10,%zmm9,%zmm5,%zmm2 + vpxord %zmm2,%zmm1,%zmm1 + vpclmulqdq $0x01,%zmm0,%zmm10,%zmm2 + vpshufd $0x4e,%zmm0,%zmm0 + vpternlogd $0x96,%zmm2,%zmm0,%zmm1 + vpclmulqdq $0x11,%zmm9,%zmm5,%zmm5 + vpclmulqdq $0x01,%zmm1,%zmm10,%zmm0 + vpshufd $0x4e,%zmm1,%zmm1 + vpternlogd $0x96,%zmm0,%zmm1,%zmm5 + + vextracti32x4 $1,%zmm5,%xmm0 + vextracti32x4 $2,%zmm5,%xmm1 + vextracti32x4 $3,%zmm5,%xmm2 + vpxord %xmm0,%xmm5,%xmm5 + vpternlogd $0x96,%xmm1,%xmm2,%xmm5 + + addq $64,%rdx + subq $64,%rcx + cmpq $64,%rcx + jae L$aad_loop_1x__func2 + +L$aad_large_done__func2: + + + vzeroupper + + +L$aad_blockbyblock__func2: + testq %rcx,%rcx + jz L$aad_done__func2 + vmovdqu 256-16(%rsi),%xmm9 +L$aad_loop_blockbyblock__func2: + vmovdqu (%rdx),%xmm0 + vpshufb %xmm4,%xmm0,%xmm0 + vpxor %xmm0,%xmm5,%xmm5 + vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 + vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 + vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm5 + + addq $16,%rdx + subq $16,%rcx + jnz L$aad_loop_blockbyblock__func2 + +L$aad_done__func2: + + vpshufb %xmm4,%xmm5,%xmm5 + vmovdqu %xmm5,(%rdi) + ret + + + +.globl _aes_gcm_enc_update_vaes_avx10_512 +.private_extern _aes_gcm_enc_update_vaes_avx10_512 + +.p2align 5 +_aes_gcm_enc_update_vaes_avx10_512: + + +_CET_ENDBR + pushq %r12 + + + movq 16(%rsp),%r12 +#ifdef BORINGSSL_DISPATCH_TEST + + movb $1,_BORINGSSL_function_hit+7(%rip) +#endif + + vbroadcasti32x4 L$bswap_mask(%rip),%zmm8 + vbroadcasti32x4 L$gfpoly(%rip),%zmm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%zmm12 + vpshufb %zmm8,%zmm12,%zmm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%zmm13 + vbroadcasti32x4 (%r11),%zmm14 + + + vpaddd L$ctr_pattern(%rip),%zmm12,%zmm12 + + + vbroadcasti32x4 L$inc_4blocks(%rip),%zmm11 + + + + cmpq $256-1,%rdx + jbe L$crypt_loop_4x_done__func3 + + + vmovdqu8 256-256(%r9),%zmm27 + vmovdqu8 256-192(%r9),%zmm28 + vmovdqu8 256-128(%r9),%zmm29 + vmovdqu8 256-64(%r9),%zmm30 + + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + leaq 16(%rcx),%rax +L$vaesenc_loop_first_4_vecs__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_first_4_vecs__func3 + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + jbe L$ghash_last_ciphertext_4x__func3 + vbroadcasti32x4 -144(%r11),%zmm15 + vbroadcasti32x4 -128(%r11),%zmm16 + vbroadcasti32x4 -112(%r11),%zmm17 + vbroadcasti32x4 -96(%r11),%zmm18 + vbroadcasti32x4 -80(%r11),%zmm19 + vbroadcasti32x4 -64(%r11),%zmm20 + vbroadcasti32x4 -48(%r11),%zmm21 + vbroadcasti32x4 -32(%r11),%zmm22 + vbroadcasti32x4 -16(%r11),%zmm23 +L$crypt_loop_4x__func3: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + cmpl $24,%r10d + jl L$aes128__func3 + je L$aes192__func3 + + vbroadcasti32x4 -208(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -192(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +L$aes192__func3: + vbroadcasti32x4 -176(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -160(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +L$aes128__func3: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + + vaesenc %zmm15,%zmm0,%zmm0 + vaesenc %zmm15,%zmm1,%zmm1 + vaesenc %zmm15,%zmm2,%zmm2 + vaesenc %zmm15,%zmm3,%zmm3 + + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + + vaesenc %zmm16,%zmm0,%zmm0 + vaesenc %zmm16,%zmm1,%zmm1 + vaesenc %zmm16,%zmm2,%zmm2 + vaesenc %zmm16,%zmm3,%zmm3 + + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + + vaesenc %zmm17,%zmm0,%zmm0 + vaesenc %zmm17,%zmm1,%zmm1 + vaesenc %zmm17,%zmm2,%zmm2 + vaesenc %zmm17,%zmm3,%zmm3 + + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + + vaesenc %zmm18,%zmm0,%zmm0 + vaesenc %zmm18,%zmm1,%zmm1 + vaesenc %zmm18,%zmm2,%zmm2 + vaesenc %zmm18,%zmm3,%zmm3 + + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + + vaesenc %zmm19,%zmm0,%zmm0 + vaesenc %zmm19,%zmm1,%zmm1 + vaesenc %zmm19,%zmm2,%zmm2 + vaesenc %zmm19,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + + vaesenc %zmm20,%zmm0,%zmm0 + vaesenc %zmm20,%zmm1,%zmm1 + vaesenc %zmm20,%zmm2,%zmm2 + vaesenc %zmm20,%zmm3,%zmm3 + + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + + vaesenc %zmm21,%zmm0,%zmm0 + vaesenc %zmm21,%zmm1,%zmm1 + vaesenc %zmm21,%zmm2,%zmm2 + vaesenc %zmm21,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + + vaesenc %zmm22,%zmm0,%zmm0 + vaesenc %zmm22,%zmm1,%zmm1 + vaesenc %zmm22,%zmm2,%zmm2 + vaesenc %zmm22,%zmm3,%zmm3 + + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + + vaesenc %zmm23,%zmm0,%zmm0 + vaesenc %zmm23,%zmm1,%zmm1 + vaesenc %zmm23,%zmm2,%zmm2 + vaesenc %zmm23,%zmm3,%zmm3 + + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + ja L$crypt_loop_4x__func3 +L$ghash_last_ciphertext_4x__func3: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + +L$crypt_loop_4x_done__func3: + + testq %rdx,%rdx + jz L$done__func3 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $64,%rdx + jb L$partial_vec__func3 + +L$crypt_loop_1x__func3: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_full_vec__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_full_vec__func3 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1 + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi) + + + vmovdqu8 (%r8),%zmm30 + vpshufb %zmm8,%zmm0,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $64,%r8 + addq $64,%rdi + addq $64,%rsi + subq $64,%rdx + cmpq $64,%rdx + jae L$crypt_loop_1x__func3 + + testq %rdx,%rdx + jz L$reduce__func3 + +L$partial_vec__func3: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k2 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_partialvec__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_partialvec__func3 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1{%k1}{z} + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%zmm30{%k2}{z} + vmovdqu8 %zmm0,%zmm1{%k1}{z} + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + +L$reduce__func3: + + vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 + vpshufd $0x4e,%zmm4,%zmm4 + vpternlogd $0x96,%zmm0,%zmm4,%zmm5 + vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 + vpshufd $0x4e,%zmm5,%zmm5 + vpternlogd $0x96,%zmm0,%zmm5,%zmm6 + + vextracti32x4 $1,%zmm6,%xmm0 + vextracti32x4 $2,%zmm6,%xmm1 + vextracti32x4 $3,%zmm6,%xmm2 + vpxord %xmm0,%xmm6,%xmm10 + vpternlogd $0x96,%xmm1,%xmm2,%xmm10 + + +L$done__func3: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 + + ret + + + +.globl _aes_gcm_dec_update_vaes_avx10_512 +.private_extern _aes_gcm_dec_update_vaes_avx10_512 + +.p2align 5 +_aes_gcm_dec_update_vaes_avx10_512: + + +_CET_ENDBR + pushq %r12 + + + movq 16(%rsp),%r12 + + vbroadcasti32x4 L$bswap_mask(%rip),%zmm8 + vbroadcasti32x4 L$gfpoly(%rip),%zmm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%zmm12 + vpshufb %zmm8,%zmm12,%zmm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%zmm13 + vbroadcasti32x4 (%r11),%zmm14 + + + vpaddd L$ctr_pattern(%rip),%zmm12,%zmm12 + + + vbroadcasti32x4 L$inc_4blocks(%rip),%zmm11 + + + + cmpq $256-1,%rdx + jbe L$crypt_loop_4x_done__func4 + + + vmovdqu8 256-256(%r9),%zmm27 + vmovdqu8 256-192(%r9),%zmm28 + vmovdqu8 256-128(%r9),%zmm29 + vmovdqu8 256-64(%r9),%zmm30 + vbroadcasti32x4 -144(%r11),%zmm15 + vbroadcasti32x4 -128(%r11),%zmm16 + vbroadcasti32x4 -112(%r11),%zmm17 + vbroadcasti32x4 -96(%r11),%zmm18 + vbroadcasti32x4 -80(%r11),%zmm19 + vbroadcasti32x4 -64(%r11),%zmm20 + vbroadcasti32x4 -48(%r11),%zmm21 + vbroadcasti32x4 -32(%r11),%zmm22 + vbroadcasti32x4 -16(%r11),%zmm23 +L$crypt_loop_4x__func4: + vmovdqu8 0(%rdi),%zmm4 + vmovdqu8 64(%rdi),%zmm5 + vmovdqu8 128(%rdi),%zmm6 + vmovdqu8 192(%rdi),%zmm7 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + cmpl $24,%r10d + jl L$aes128__func4 + je L$aes192__func4 + + vbroadcasti32x4 -208(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -192(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +L$aes192__func4: + vbroadcasti32x4 -176(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -160(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +L$aes128__func4: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + + vaesenc %zmm15,%zmm0,%zmm0 + vaesenc %zmm15,%zmm1,%zmm1 + vaesenc %zmm15,%zmm2,%zmm2 + vaesenc %zmm15,%zmm3,%zmm3 + + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + + vaesenc %zmm16,%zmm0,%zmm0 + vaesenc %zmm16,%zmm1,%zmm1 + vaesenc %zmm16,%zmm2,%zmm2 + vaesenc %zmm16,%zmm3,%zmm3 + + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + + vaesenc %zmm17,%zmm0,%zmm0 + vaesenc %zmm17,%zmm1,%zmm1 + vaesenc %zmm17,%zmm2,%zmm2 + vaesenc %zmm17,%zmm3,%zmm3 + + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + + vaesenc %zmm18,%zmm0,%zmm0 + vaesenc %zmm18,%zmm1,%zmm1 + vaesenc %zmm18,%zmm2,%zmm2 + vaesenc %zmm18,%zmm3,%zmm3 + + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + + vaesenc %zmm19,%zmm0,%zmm0 + vaesenc %zmm19,%zmm1,%zmm1 + vaesenc %zmm19,%zmm2,%zmm2 + vaesenc %zmm19,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + + vaesenc %zmm20,%zmm0,%zmm0 + vaesenc %zmm20,%zmm1,%zmm1 + vaesenc %zmm20,%zmm2,%zmm2 + vaesenc %zmm20,%zmm3,%zmm3 + + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + + vaesenc %zmm21,%zmm0,%zmm0 + vaesenc %zmm21,%zmm1,%zmm1 + vaesenc %zmm21,%zmm2,%zmm2 + vaesenc %zmm21,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + + vaesenc %zmm22,%zmm0,%zmm0 + vaesenc %zmm22,%zmm1,%zmm1 + vaesenc %zmm22,%zmm2,%zmm2 + vaesenc %zmm22,%zmm3,%zmm3 + + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + + vaesenc %zmm23,%zmm0,%zmm0 + vaesenc %zmm23,%zmm1,%zmm1 + vaesenc %zmm23,%zmm2,%zmm2 + vaesenc %zmm23,%zmm3,%zmm3 + + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + ja L$crypt_loop_4x__func4 +L$crypt_loop_4x_done__func4: + + testq %rdx,%rdx + jz L$done__func4 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $64,%rdx + jb L$partial_vec__func4 + +L$crypt_loop_1x__func4: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_full_vec__func4: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_full_vec__func4 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1 + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi) + + + vmovdqu8 (%r8),%zmm30 + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $64,%r8 + addq $64,%rdi + addq $64,%rsi + subq $64,%rdx + cmpq $64,%rdx + jae L$crypt_loop_1x__func4 + + testq %rdx,%rdx + jz L$reduce__func4 + +L$partial_vec__func4: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k2 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +L$vaesenc_loop_tail_partialvec__func4: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne L$vaesenc_loop_tail_partialvec__func4 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1{%k1}{z} + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%zmm30{%k2}{z} + + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + +L$reduce__func4: + + vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 + vpshufd $0x4e,%zmm4,%zmm4 + vpternlogd $0x96,%zmm0,%zmm4,%zmm5 + vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 + vpshufd $0x4e,%zmm5,%zmm5 + vpternlogd $0x96,%zmm0,%zmm5,%zmm6 + + vextracti32x4 $1,%zmm6,%xmm0 + vextracti32x4 $2,%zmm6,%xmm1 + vextracti32x4 $3,%zmm6,%xmm2 + vpxord %xmm0,%xmm6,%xmm10 + vpternlogd $0x96,%xmm1,%xmm2,%xmm10 + + +L$done__func4: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 + + ret + + + +#endif +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif + diff --git a/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-linux.S b/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-linux.S new file mode 100644 index 00000000..df22c7a9 --- /dev/null +++ b/Sources/CCryptoBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-linux.S @@ -0,0 +1,2279 @@ +#define BORINGSSL_PREFIX CCryptoBoringSSL +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. + +#include + +#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) +.section .rodata +.align 64 + + +.Lbswap_mask: +.quad 0x08090a0b0c0d0e0f, 0x0001020304050607 + + + + + + + + +.Lgfpoly: +.quad 1, 0xc200000000000000 + + +.Lgfpoly_and_internal_carrybit: +.quad 1, 0xc200000000000001 + + + + + +.Lctr_pattern: +.quad 0, 0 +.quad 1, 0 +.Linc_2blocks: +.quad 2, 0 +.quad 3, 0 +.Linc_4blocks: +.quad 4, 0 + +.text +.globl gcm_gmult_vpclmulqdq_avx10 +.hidden gcm_gmult_vpclmulqdq_avx10 +.type gcm_gmult_vpclmulqdq_avx10,@function +.align 32 +gcm_gmult_vpclmulqdq_avx10: +.cfi_startproc + +_CET_ENDBR + + + + vmovdqu (%rdi),%xmm0 + vmovdqu .Lbswap_mask(%rip),%xmm1 + vmovdqu 256-16(%rsi),%xmm2 + vmovdqu .Lgfpoly(%rip),%xmm3 + vpshufb %xmm1,%xmm0,%xmm0 + + vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 + vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 + vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 + vpxord %xmm6,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 + vpshufd $0x4e,%xmm4,%xmm4 + vpternlogd $0x96,%xmm6,%xmm4,%xmm5 + vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 + vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 + vpshufd $0x4e,%xmm5,%xmm5 + vpternlogd $0x96,%xmm4,%xmm5,%xmm0 + + + vpshufb %xmm1,%xmm0,%xmm0 + vmovdqu %xmm0,(%rdi) + ret + +.cfi_endproc +.size gcm_gmult_vpclmulqdq_avx10, . - gcm_gmult_vpclmulqdq_avx10 +.globl gcm_init_vpclmulqdq_avx10 +.hidden gcm_init_vpclmulqdq_avx10 +.type gcm_init_vpclmulqdq_avx10,@function +.align 32 +gcm_init_vpclmulqdq_avx10: +.cfi_startproc + +_CET_ENDBR + + leaq 256-32(%rdi),%r8 + + + + vpshufd $0x4e,(%rsi),%xmm3 + + + + + + + + + + + + + + + + + vpshufd $0xd3,%xmm3,%xmm0 + vpsrad $31,%xmm0,%xmm0 + vpaddq %xmm3,%xmm3,%xmm3 + + vpternlogd $0x78,.Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm3 + + + vbroadcasti32x4 .Lgfpoly(%rip),%ymm5 + + + + + + + + + vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 + vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 + vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm3,%xmm3,%xmm4 + vpclmulqdq $0x01,%xmm1,%xmm5,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm4 + + + + vinserti128 $1,%xmm3,%ymm4,%ymm3 + vinserti128 $1,%xmm4,%ymm4,%ymm4 + + vmovdqu8 %ymm3,(%r8) + + + + + + movl $7,%eax +.Lprecompute_next__func1: + subq $32,%r8 + vpclmulqdq $0x00,%ymm4,%ymm3,%ymm0 + vpclmulqdq $0x01,%ymm4,%ymm3,%ymm1 + vpclmulqdq $0x10,%ymm4,%ymm3,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 + vpshufd $0x4e,%ymm0,%ymm0 + vpternlogd $0x96,%ymm2,%ymm0,%ymm1 + vpclmulqdq $0x11,%ymm4,%ymm3,%ymm3 + vpclmulqdq $0x01,%ymm1,%ymm5,%ymm0 + vpshufd $0x4e,%ymm1,%ymm1 + vpternlogd $0x96,%ymm0,%ymm1,%ymm3 + + vmovdqu8 %ymm3,(%r8) + decl %eax + jnz .Lprecompute_next__func1 + + vzeroupper + ret + +.cfi_endproc +.size gcm_init_vpclmulqdq_avx10, . - gcm_init_vpclmulqdq_avx10 +.globl gcm_ghash_vpclmulqdq_avx10_256 +.hidden gcm_ghash_vpclmulqdq_avx10_256 +.type gcm_ghash_vpclmulqdq_avx10_256,@function +.align 32 +gcm_ghash_vpclmulqdq_avx10_256: +.cfi_startproc + +_CET_ENDBR + + + + + + + vmovdqu .Lbswap_mask(%rip),%xmm4 + vmovdqu .Lgfpoly(%rip),%xmm10 + + + vmovdqu (%rdi),%xmm5 + vpshufb %xmm4,%xmm5,%xmm5 + + + cmpq $32,%rcx + jb .Laad_blockbyblock__func1 + + + + vshufi64x2 $0,%ymm4,%ymm4,%ymm4 + vshufi64x2 $0,%ymm10,%ymm10,%ymm10 + + + vmovdqu8 256-32(%rsi),%ymm9 + + cmpq $128-1,%rcx + jbe .Laad_loop_1x__func1 + + + vmovdqu8 256-128(%rsi),%ymm6 + vmovdqu8 256-96(%rsi),%ymm7 + vmovdqu8 256-64(%rsi),%ymm8 + + +.Laad_loop_4x__func1: + vmovdqu8 0(%rdx),%ymm0 + vmovdqu8 32(%rdx),%ymm1 + vmovdqu8 64(%rdx),%ymm2 + vmovdqu8 96(%rdx),%ymm3 + vpshufb %ymm4,%ymm0,%ymm0 + vpxord %ymm5,%ymm0,%ymm0 + vpshufb %ymm4,%ymm1,%ymm1 + vpshufb %ymm4,%ymm2,%ymm2 + vpshufb %ymm4,%ymm3,%ymm3 + vpclmulqdq $0x00,%ymm6,%ymm0,%ymm5 + vpclmulqdq $0x00,%ymm7,%ymm1,%ymm11 + vpclmulqdq $0x00,%ymm8,%ymm2,%ymm12 + vpxord %ymm11,%ymm5,%ymm5 + vpclmulqdq $0x00,%ymm9,%ymm3,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm5 + vpclmulqdq $0x01,%ymm6,%ymm0,%ymm11 + vpclmulqdq $0x01,%ymm7,%ymm1,%ymm12 + vpclmulqdq $0x01,%ymm8,%ymm2,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x01,%ymm9,%ymm3,%ymm12 + vpclmulqdq $0x10,%ymm6,%ymm0,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x10,%ymm7,%ymm1,%ymm12 + vpclmulqdq $0x10,%ymm8,%ymm2,%ymm13 + vpternlogd $0x96,%ymm13,%ymm12,%ymm11 + vpclmulqdq $0x01,%ymm5,%ymm10,%ymm13 + vpclmulqdq $0x10,%ymm9,%ymm3,%ymm12 + vpxord %ymm12,%ymm11,%ymm11 + vpshufd $0x4e,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm6,%ymm0,%ymm0 + vpclmulqdq $0x11,%ymm7,%ymm1,%ymm1 + vpclmulqdq $0x11,%ymm8,%ymm2,%ymm2 + vpternlogd $0x96,%ymm13,%ymm5,%ymm11 + vpclmulqdq $0x11,%ymm9,%ymm3,%ymm3 + vpternlogd $0x96,%ymm2,%ymm1,%ymm0 + vpclmulqdq $0x01,%ymm11,%ymm10,%ymm12 + vpxord %ymm3,%ymm0,%ymm5 + vpshufd $0x4e,%ymm11,%ymm11 + vpternlogd $0x96,%ymm12,%ymm11,%ymm5 + vextracti32x4 $1,%ymm5,%xmm0 + vpxord %xmm0,%xmm5,%xmm5 + + subq $-128,%rdx + addq $-128,%rcx + cmpq $128-1,%rcx + ja .Laad_loop_4x__func1 + + + cmpq $32,%rcx + jb .Laad_large_done__func1 +.Laad_loop_1x__func1: + vmovdqu8 (%rdx),%ymm0 + vpshufb %ymm4,%ymm0,%ymm0 + vpxord %ymm0,%ymm5,%ymm5 + vpclmulqdq $0x00,%ymm9,%ymm5,%ymm0 + vpclmulqdq $0x01,%ymm9,%ymm5,%ymm1 + vpclmulqdq $0x10,%ymm9,%ymm5,%ymm2 + vpxord %ymm2,%ymm1,%ymm1 + vpclmulqdq $0x01,%ymm0,%ymm10,%ymm2 + vpshufd $0x4e,%ymm0,%ymm0 + vpternlogd $0x96,%ymm2,%ymm0,%ymm1 + vpclmulqdq $0x11,%ymm9,%ymm5,%ymm5 + vpclmulqdq $0x01,%ymm1,%ymm10,%ymm0 + vpshufd $0x4e,%ymm1,%ymm1 + vpternlogd $0x96,%ymm0,%ymm1,%ymm5 + + vextracti32x4 $1,%ymm5,%xmm0 + vpxord %xmm0,%xmm5,%xmm5 + + addq $32,%rdx + subq $32,%rcx + cmpq $32,%rcx + jae .Laad_loop_1x__func1 + +.Laad_large_done__func1: + + + vzeroupper + + +.Laad_blockbyblock__func1: + testq %rcx,%rcx + jz .Laad_done__func1 + vmovdqu 256-16(%rsi),%xmm9 +.Laad_loop_blockbyblock__func1: + vmovdqu (%rdx),%xmm0 + vpshufb %xmm4,%xmm0,%xmm0 + vpxor %xmm0,%xmm5,%xmm5 + vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 + vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 + vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm5 + + addq $16,%rdx + subq $16,%rcx + jnz .Laad_loop_blockbyblock__func1 + +.Laad_done__func1: + + vpshufb %xmm4,%xmm5,%xmm5 + vmovdqu %xmm5,(%rdi) + ret + +.cfi_endproc +.size gcm_ghash_vpclmulqdq_avx10_256, . - gcm_ghash_vpclmulqdq_avx10_256 +.globl aes_gcm_enc_update_vaes_avx10_256 +.hidden aes_gcm_enc_update_vaes_avx10_256 +.type aes_gcm_enc_update_vaes_avx10_256,@function +.align 32 +aes_gcm_enc_update_vaes_avx10_256: +.cfi_startproc + +_CET_ENDBR + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + + movq 16(%rsp),%r12 +#ifdef BORINGSSL_DISPATCH_TEST +.extern BORINGSSL_function_hit +.hidden BORINGSSL_function_hit + movb $1,BORINGSSL_function_hit+6(%rip) +#endif + + vbroadcasti32x4 .Lbswap_mask(%rip),%ymm8 + vbroadcasti32x4 .Lgfpoly(%rip),%ymm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%ymm12 + vpshufb %ymm8,%ymm12,%ymm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%ymm13 + vbroadcasti32x4 (%r11),%ymm14 + + + vpaddd .Lctr_pattern(%rip),%ymm12,%ymm12 + + + vbroadcasti32x4 .Linc_2blocks(%rip),%ymm11 + + + + cmpq $128-1,%rdx + jbe .Lcrypt_loop_4x_done__func1 + + + vmovdqu8 256-128(%r9),%ymm27 + vmovdqu8 256-96(%r9),%ymm28 + vmovdqu8 256-64(%r9),%ymm29 + vmovdqu8 256-32(%r9),%ymm30 + + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + leaq 16(%rcx),%rax +.Lvaesenc_loop_first_4_vecs__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_first_4_vecs__func1 + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + jbe .Lghash_last_ciphertext_4x__func1 + vbroadcasti32x4 -144(%r11),%ymm15 + vbroadcasti32x4 -128(%r11),%ymm16 + vbroadcasti32x4 -112(%r11),%ymm17 + vbroadcasti32x4 -96(%r11),%ymm18 + vbroadcasti32x4 -80(%r11),%ymm19 + vbroadcasti32x4 -64(%r11),%ymm20 + vbroadcasti32x4 -48(%r11),%ymm21 + vbroadcasti32x4 -32(%r11),%ymm22 + vbroadcasti32x4 -16(%r11),%ymm23 +.Lcrypt_loop_4x__func1: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + cmpl $24,%r10d + jl .Laes128__func1 + je .Laes192__func1 + + vbroadcasti32x4 -208(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -192(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +.Laes192__func1: + vbroadcasti32x4 -176(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -160(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +.Laes128__func1: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + + vaesenc %ymm15,%ymm0,%ymm0 + vaesenc %ymm15,%ymm1,%ymm1 + vaesenc %ymm15,%ymm2,%ymm2 + vaesenc %ymm15,%ymm3,%ymm3 + + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + + vaesenc %ymm16,%ymm0,%ymm0 + vaesenc %ymm16,%ymm1,%ymm1 + vaesenc %ymm16,%ymm2,%ymm2 + vaesenc %ymm16,%ymm3,%ymm3 + + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + + vaesenc %ymm17,%ymm0,%ymm0 + vaesenc %ymm17,%ymm1,%ymm1 + vaesenc %ymm17,%ymm2,%ymm2 + vaesenc %ymm17,%ymm3,%ymm3 + + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + + vaesenc %ymm18,%ymm0,%ymm0 + vaesenc %ymm18,%ymm1,%ymm1 + vaesenc %ymm18,%ymm2,%ymm2 + vaesenc %ymm18,%ymm3,%ymm3 + + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + + vaesenc %ymm19,%ymm0,%ymm0 + vaesenc %ymm19,%ymm1,%ymm1 + vaesenc %ymm19,%ymm2,%ymm2 + vaesenc %ymm19,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + + vaesenc %ymm20,%ymm0,%ymm0 + vaesenc %ymm20,%ymm1,%ymm1 + vaesenc %ymm20,%ymm2,%ymm2 + vaesenc %ymm20,%ymm3,%ymm3 + + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + + vaesenc %ymm21,%ymm0,%ymm0 + vaesenc %ymm21,%ymm1,%ymm1 + vaesenc %ymm21,%ymm2,%ymm2 + vaesenc %ymm21,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + + vaesenc %ymm22,%ymm0,%ymm0 + vaesenc %ymm22,%ymm1,%ymm1 + vaesenc %ymm22,%ymm2,%ymm2 + vaesenc %ymm22,%ymm3,%ymm3 + + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + + vaesenc %ymm23,%ymm0,%ymm0 + vaesenc %ymm23,%ymm1,%ymm1 + vaesenc %ymm23,%ymm2,%ymm2 + vaesenc %ymm23,%ymm3,%ymm3 + + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + ja .Lcrypt_loop_4x__func1 +.Lghash_last_ciphertext_4x__func1: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + +.Lcrypt_loop_4x_done__func1: + + testq %rdx,%rdx + jz .Ldone__func1 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $32,%rdx + jb .Lpartial_vec__func1 + +.Lcrypt_loop_1x__func1: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_full_vec__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_full_vec__func1 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1 + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi) + + + vmovdqu8 (%r8),%ymm30 + vpshufb %ymm8,%ymm0,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $32,%r8 + addq $32,%rdi + addq $32,%rsi + subq $32,%rdx + cmpq $32,%rdx + jae .Lcrypt_loop_1x__func1 + + testq %rdx,%rdx + jz .Lreduce__func1 + +.Lpartial_vec__func1: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k2 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_partialvec__func1: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_partialvec__func1 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1{%k1}{z} + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%ymm30{%k2}{z} + vmovdqu8 %ymm0,%ymm1{%k1}{z} + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + +.Lreduce__func1: + + vpclmulqdq $0x01,%ymm4,%ymm31,%ymm0 + vpshufd $0x4e,%ymm4,%ymm4 + vpternlogd $0x96,%ymm0,%ymm4,%ymm5 + vpclmulqdq $0x01,%ymm5,%ymm31,%ymm0 + vpshufd $0x4e,%ymm5,%ymm5 + vpternlogd $0x96,%ymm0,%ymm5,%ymm6 + + vextracti32x4 $1,%ymm6,%xmm0 + vpxord %xmm0,%xmm6,%xmm10 + + +.Ldone__func1: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 +.cfi_adjust_cfa_offset -8 +.cfi_restore %r12 + ret + +.cfi_endproc +.size aes_gcm_enc_update_vaes_avx10_256, . - aes_gcm_enc_update_vaes_avx10_256 +.globl aes_gcm_dec_update_vaes_avx10_256 +.hidden aes_gcm_dec_update_vaes_avx10_256 +.type aes_gcm_dec_update_vaes_avx10_256,@function +.align 32 +aes_gcm_dec_update_vaes_avx10_256: +.cfi_startproc + +_CET_ENDBR + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + + movq 16(%rsp),%r12 + + vbroadcasti32x4 .Lbswap_mask(%rip),%ymm8 + vbroadcasti32x4 .Lgfpoly(%rip),%ymm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%ymm12 + vpshufb %ymm8,%ymm12,%ymm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%ymm13 + vbroadcasti32x4 (%r11),%ymm14 + + + vpaddd .Lctr_pattern(%rip),%ymm12,%ymm12 + + + vbroadcasti32x4 .Linc_2blocks(%rip),%ymm11 + + + + cmpq $128-1,%rdx + jbe .Lcrypt_loop_4x_done__func2 + + + vmovdqu8 256-128(%r9),%ymm27 + vmovdqu8 256-96(%r9),%ymm28 + vmovdqu8 256-64(%r9),%ymm29 + vmovdqu8 256-32(%r9),%ymm30 + vbroadcasti32x4 -144(%r11),%ymm15 + vbroadcasti32x4 -128(%r11),%ymm16 + vbroadcasti32x4 -112(%r11),%ymm17 + vbroadcasti32x4 -96(%r11),%ymm18 + vbroadcasti32x4 -80(%r11),%ymm19 + vbroadcasti32x4 -64(%r11),%ymm20 + vbroadcasti32x4 -48(%r11),%ymm21 + vbroadcasti32x4 -32(%r11),%ymm22 + vbroadcasti32x4 -16(%r11),%ymm23 +.Lcrypt_loop_4x__func2: + vmovdqu8 0(%rdi),%ymm4 + vmovdqu8 32(%rdi),%ymm5 + vmovdqu8 64(%rdi),%ymm6 + vmovdqu8 96(%rdi),%ymm7 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm1 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm2 + vpaddd %ymm11,%ymm12,%ymm12 + vpshufb %ymm8,%ymm12,%ymm3 + vpaddd %ymm11,%ymm12,%ymm12 + + + vpxord %ymm13,%ymm0,%ymm0 + vpxord %ymm13,%ymm1,%ymm1 + vpxord %ymm13,%ymm2,%ymm2 + vpxord %ymm13,%ymm3,%ymm3 + + cmpl $24,%r10d + jl .Laes128__func2 + je .Laes192__func2 + + vbroadcasti32x4 -208(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -192(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +.Laes192__func2: + vbroadcasti32x4 -176(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + + vbroadcasti32x4 -160(%r11),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + vaesenc %ymm9,%ymm1,%ymm1 + vaesenc %ymm9,%ymm2,%ymm2 + vaesenc %ymm9,%ymm3,%ymm3 + +.Laes128__func2: + vpshufb %ymm8,%ymm4,%ymm4 + vpxord %ymm10,%ymm4,%ymm4 + vpshufb %ymm8,%ymm5,%ymm5 + vpshufb %ymm8,%ymm6,%ymm6 + + vaesenc %ymm15,%ymm0,%ymm0 + vaesenc %ymm15,%ymm1,%ymm1 + vaesenc %ymm15,%ymm2,%ymm2 + vaesenc %ymm15,%ymm3,%ymm3 + + vpshufb %ymm8,%ymm7,%ymm7 + vpclmulqdq $0x00,%ymm27,%ymm4,%ymm10 + vpclmulqdq $0x00,%ymm28,%ymm5,%ymm24 + vpclmulqdq $0x00,%ymm29,%ymm6,%ymm25 + + vaesenc %ymm16,%ymm0,%ymm0 + vaesenc %ymm16,%ymm1,%ymm1 + vaesenc %ymm16,%ymm2,%ymm2 + vaesenc %ymm16,%ymm3,%ymm3 + + vpxord %ymm24,%ymm10,%ymm10 + vpclmulqdq $0x00,%ymm30,%ymm7,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm10 + vpclmulqdq $0x01,%ymm27,%ymm4,%ymm24 + + vaesenc %ymm17,%ymm0,%ymm0 + vaesenc %ymm17,%ymm1,%ymm1 + vaesenc %ymm17,%ymm2,%ymm2 + vaesenc %ymm17,%ymm3,%ymm3 + + vpclmulqdq $0x01,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x01,%ymm29,%ymm6,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm30,%ymm7,%ymm25 + + vaesenc %ymm18,%ymm0,%ymm0 + vaesenc %ymm18,%ymm1,%ymm1 + vaesenc %ymm18,%ymm2,%ymm2 + vaesenc %ymm18,%ymm3,%ymm3 + + vpclmulqdq $0x10,%ymm27,%ymm4,%ymm26 + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x10,%ymm28,%ymm5,%ymm25 + vpclmulqdq $0x10,%ymm29,%ymm6,%ymm26 + + vaesenc %ymm19,%ymm0,%ymm0 + vaesenc %ymm19,%ymm1,%ymm1 + vaesenc %ymm19,%ymm2,%ymm2 + vaesenc %ymm19,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm25,%ymm24 + vpclmulqdq $0x01,%ymm10,%ymm31,%ymm26 + vpclmulqdq $0x10,%ymm30,%ymm7,%ymm25 + vpxord %ymm25,%ymm24,%ymm24 + + vaesenc %ymm20,%ymm0,%ymm0 + vaesenc %ymm20,%ymm1,%ymm1 + vaesenc %ymm20,%ymm2,%ymm2 + vaesenc %ymm20,%ymm3,%ymm3 + + vpshufd $0x4e,%ymm10,%ymm10 + vpclmulqdq $0x11,%ymm27,%ymm4,%ymm4 + vpclmulqdq $0x11,%ymm28,%ymm5,%ymm5 + vpclmulqdq $0x11,%ymm29,%ymm6,%ymm6 + + vaesenc %ymm21,%ymm0,%ymm0 + vaesenc %ymm21,%ymm1,%ymm1 + vaesenc %ymm21,%ymm2,%ymm2 + vaesenc %ymm21,%ymm3,%ymm3 + + vpternlogd $0x96,%ymm26,%ymm10,%ymm24 + vpclmulqdq $0x11,%ymm30,%ymm7,%ymm7 + vpternlogd $0x96,%ymm6,%ymm5,%ymm4 + vpclmulqdq $0x01,%ymm24,%ymm31,%ymm25 + + vaesenc %ymm22,%ymm0,%ymm0 + vaesenc %ymm22,%ymm1,%ymm1 + vaesenc %ymm22,%ymm2,%ymm2 + vaesenc %ymm22,%ymm3,%ymm3 + + vpxord %ymm7,%ymm4,%ymm10 + vpshufd $0x4e,%ymm24,%ymm24 + vpternlogd $0x96,%ymm25,%ymm24,%ymm10 + + vaesenc %ymm23,%ymm0,%ymm0 + vaesenc %ymm23,%ymm1,%ymm1 + vaesenc %ymm23,%ymm2,%ymm2 + vaesenc %ymm23,%ymm3,%ymm3 + + vextracti32x4 $1,%ymm10,%xmm4 + vpxord %xmm4,%xmm10,%xmm10 + + + + + vpxord 0(%rdi),%ymm14,%ymm4 + vpxord 32(%rdi),%ymm14,%ymm5 + vpxord 64(%rdi),%ymm14,%ymm6 + vpxord 96(%rdi),%ymm14,%ymm7 + + + + vaesenclast %ymm4,%ymm0,%ymm4 + vaesenclast %ymm5,%ymm1,%ymm5 + vaesenclast %ymm6,%ymm2,%ymm6 + vaesenclast %ymm7,%ymm3,%ymm7 + + + vmovdqu8 %ymm4,0(%rsi) + vmovdqu8 %ymm5,32(%rsi) + vmovdqu8 %ymm6,64(%rsi) + vmovdqu8 %ymm7,96(%rsi) + + subq $-128,%rdi + subq $-128,%rsi + addq $-128,%rdx + cmpq $128-1,%rdx + ja .Lcrypt_loop_4x__func2 +.Lcrypt_loop_4x_done__func2: + + testq %rdx,%rdx + jz .Ldone__func2 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $32,%rdx + jb .Lpartial_vec__func2 + +.Lcrypt_loop_1x__func2: + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpaddd %ymm11,%ymm12,%ymm12 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_full_vec__func2: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_full_vec__func2 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1 + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi) + + + vmovdqu8 (%r8),%ymm30 + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $32,%r8 + addq $32,%rdi + addq $32,%rsi + subq $32,%rdx + cmpq $32,%rdx + jae .Lcrypt_loop_1x__func2 + + testq %rdx,%rdx + jz .Lreduce__func2 + +.Lpartial_vec__func2: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovd %eax,%k2 + + + + vpshufb %ymm8,%ymm12,%ymm0 + vpxord %ymm13,%ymm0,%ymm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_partialvec__func2: + vbroadcasti32x4 (%rax),%ymm9 + vaesenc %ymm9,%ymm0,%ymm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_partialvec__func2 + vaesenclast %ymm14,%ymm0,%ymm0 + + + vmovdqu8 (%rdi),%ymm1{%k1}{z} + vpxord %ymm1,%ymm0,%ymm0 + vmovdqu8 %ymm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%ymm30{%k2}{z} + + vpshufb %ymm8,%ymm1,%ymm0 + vpxord %ymm10,%ymm0,%ymm0 + vpclmulqdq $0x00,%ymm30,%ymm0,%ymm7 + vpclmulqdq $0x01,%ymm30,%ymm0,%ymm1 + vpclmulqdq $0x10,%ymm30,%ymm0,%ymm2 + vpclmulqdq $0x11,%ymm30,%ymm0,%ymm3 + vpxord %ymm7,%ymm4,%ymm4 + vpternlogd $0x96,%ymm2,%ymm1,%ymm5 + vpxord %ymm3,%ymm6,%ymm6 + + +.Lreduce__func2: + + vpclmulqdq $0x01,%ymm4,%ymm31,%ymm0 + vpshufd $0x4e,%ymm4,%ymm4 + vpternlogd $0x96,%ymm0,%ymm4,%ymm5 + vpclmulqdq $0x01,%ymm5,%ymm31,%ymm0 + vpshufd $0x4e,%ymm5,%ymm5 + vpternlogd $0x96,%ymm0,%ymm5,%ymm6 + + vextracti32x4 $1,%ymm6,%xmm0 + vpxord %xmm0,%xmm6,%xmm10 + + +.Ldone__func2: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 +.cfi_adjust_cfa_offset -8 +.cfi_restore %r12 + ret + +.cfi_endproc +.size aes_gcm_dec_update_vaes_avx10_256, . - aes_gcm_dec_update_vaes_avx10_256 +.globl gcm_ghash_vpclmulqdq_avx10_512 +.hidden gcm_ghash_vpclmulqdq_avx10_512 +.type gcm_ghash_vpclmulqdq_avx10_512,@function +.align 32 +gcm_ghash_vpclmulqdq_avx10_512: +.cfi_startproc + +_CET_ENDBR + + + + + + + vmovdqu .Lbswap_mask(%rip),%xmm4 + vmovdqu .Lgfpoly(%rip),%xmm10 + + + vmovdqu (%rdi),%xmm5 + vpshufb %xmm4,%xmm5,%xmm5 + + + cmpq $64,%rcx + jb .Laad_blockbyblock__func2 + + + + vshufi64x2 $0,%zmm4,%zmm4,%zmm4 + vshufi64x2 $0,%zmm10,%zmm10,%zmm10 + + + vmovdqu8 256-64(%rsi),%zmm9 + + cmpq $256-1,%rcx + jbe .Laad_loop_1x__func2 + + + vmovdqu8 256-256(%rsi),%zmm6 + vmovdqu8 256-192(%rsi),%zmm7 + vmovdqu8 256-128(%rsi),%zmm8 + + +.Laad_loop_4x__func2: + vmovdqu8 0(%rdx),%zmm0 + vmovdqu8 64(%rdx),%zmm1 + vmovdqu8 128(%rdx),%zmm2 + vmovdqu8 192(%rdx),%zmm3 + vpshufb %zmm4,%zmm0,%zmm0 + vpxord %zmm5,%zmm0,%zmm0 + vpshufb %zmm4,%zmm1,%zmm1 + vpshufb %zmm4,%zmm2,%zmm2 + vpshufb %zmm4,%zmm3,%zmm3 + vpclmulqdq $0x00,%zmm6,%zmm0,%zmm5 + vpclmulqdq $0x00,%zmm7,%zmm1,%zmm11 + vpclmulqdq $0x00,%zmm8,%zmm2,%zmm12 + vpxord %zmm11,%zmm5,%zmm5 + vpclmulqdq $0x00,%zmm9,%zmm3,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm5 + vpclmulqdq $0x01,%zmm6,%zmm0,%zmm11 + vpclmulqdq $0x01,%zmm7,%zmm1,%zmm12 + vpclmulqdq $0x01,%zmm8,%zmm2,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x01,%zmm9,%zmm3,%zmm12 + vpclmulqdq $0x10,%zmm6,%zmm0,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x10,%zmm7,%zmm1,%zmm12 + vpclmulqdq $0x10,%zmm8,%zmm2,%zmm13 + vpternlogd $0x96,%zmm13,%zmm12,%zmm11 + vpclmulqdq $0x01,%zmm5,%zmm10,%zmm13 + vpclmulqdq $0x10,%zmm9,%zmm3,%zmm12 + vpxord %zmm12,%zmm11,%zmm11 + vpshufd $0x4e,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm6,%zmm0,%zmm0 + vpclmulqdq $0x11,%zmm7,%zmm1,%zmm1 + vpclmulqdq $0x11,%zmm8,%zmm2,%zmm2 + vpternlogd $0x96,%zmm13,%zmm5,%zmm11 + vpclmulqdq $0x11,%zmm9,%zmm3,%zmm3 + vpternlogd $0x96,%zmm2,%zmm1,%zmm0 + vpclmulqdq $0x01,%zmm11,%zmm10,%zmm12 + vpxord %zmm3,%zmm0,%zmm5 + vpshufd $0x4e,%zmm11,%zmm11 + vpternlogd $0x96,%zmm12,%zmm11,%zmm5 + vextracti32x4 $1,%zmm5,%xmm0 + vextracti32x4 $2,%zmm5,%xmm1 + vextracti32x4 $3,%zmm5,%xmm2 + vpxord %xmm0,%xmm5,%xmm5 + vpternlogd $0x96,%xmm1,%xmm2,%xmm5 + + subq $-256,%rdx + addq $-256,%rcx + cmpq $256-1,%rcx + ja .Laad_loop_4x__func2 + + + cmpq $64,%rcx + jb .Laad_large_done__func2 +.Laad_loop_1x__func2: + vmovdqu8 (%rdx),%zmm0 + vpshufb %zmm4,%zmm0,%zmm0 + vpxord %zmm0,%zmm5,%zmm5 + vpclmulqdq $0x00,%zmm9,%zmm5,%zmm0 + vpclmulqdq $0x01,%zmm9,%zmm5,%zmm1 + vpclmulqdq $0x10,%zmm9,%zmm5,%zmm2 + vpxord %zmm2,%zmm1,%zmm1 + vpclmulqdq $0x01,%zmm0,%zmm10,%zmm2 + vpshufd $0x4e,%zmm0,%zmm0 + vpternlogd $0x96,%zmm2,%zmm0,%zmm1 + vpclmulqdq $0x11,%zmm9,%zmm5,%zmm5 + vpclmulqdq $0x01,%zmm1,%zmm10,%zmm0 + vpshufd $0x4e,%zmm1,%zmm1 + vpternlogd $0x96,%zmm0,%zmm1,%zmm5 + + vextracti32x4 $1,%zmm5,%xmm0 + vextracti32x4 $2,%zmm5,%xmm1 + vextracti32x4 $3,%zmm5,%xmm2 + vpxord %xmm0,%xmm5,%xmm5 + vpternlogd $0x96,%xmm1,%xmm2,%xmm5 + + addq $64,%rdx + subq $64,%rcx + cmpq $64,%rcx + jae .Laad_loop_1x__func2 + +.Laad_large_done__func2: + + + vzeroupper + + +.Laad_blockbyblock__func2: + testq %rcx,%rcx + jz .Laad_done__func2 + vmovdqu 256-16(%rsi),%xmm9 +.Laad_loop_blockbyblock__func2: + vmovdqu (%rdx),%xmm0 + vpshufb %xmm4,%xmm0,%xmm0 + vpxor %xmm0,%xmm5,%xmm5 + vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 + vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 + vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 + vpxord %xmm2,%xmm1,%xmm1 + vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 + vpshufd $0x4e,%xmm0,%xmm0 + vpternlogd $0x96,%xmm2,%xmm0,%xmm1 + vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 + vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 + vpshufd $0x4e,%xmm1,%xmm1 + vpternlogd $0x96,%xmm0,%xmm1,%xmm5 + + addq $16,%rdx + subq $16,%rcx + jnz .Laad_loop_blockbyblock__func2 + +.Laad_done__func2: + + vpshufb %xmm4,%xmm5,%xmm5 + vmovdqu %xmm5,(%rdi) + ret + +.cfi_endproc +.size gcm_ghash_vpclmulqdq_avx10_512, . - gcm_ghash_vpclmulqdq_avx10_512 +.globl aes_gcm_enc_update_vaes_avx10_512 +.hidden aes_gcm_enc_update_vaes_avx10_512 +.type aes_gcm_enc_update_vaes_avx10_512,@function +.align 32 +aes_gcm_enc_update_vaes_avx10_512: +.cfi_startproc + +_CET_ENDBR + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + + movq 16(%rsp),%r12 +#ifdef BORINGSSL_DISPATCH_TEST +.extern BORINGSSL_function_hit +.hidden BORINGSSL_function_hit + movb $1,BORINGSSL_function_hit+7(%rip) +#endif + + vbroadcasti32x4 .Lbswap_mask(%rip),%zmm8 + vbroadcasti32x4 .Lgfpoly(%rip),%zmm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%zmm12 + vpshufb %zmm8,%zmm12,%zmm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%zmm13 + vbroadcasti32x4 (%r11),%zmm14 + + + vpaddd .Lctr_pattern(%rip),%zmm12,%zmm12 + + + vbroadcasti32x4 .Linc_4blocks(%rip),%zmm11 + + + + cmpq $256-1,%rdx + jbe .Lcrypt_loop_4x_done__func3 + + + vmovdqu8 256-256(%r9),%zmm27 + vmovdqu8 256-192(%r9),%zmm28 + vmovdqu8 256-128(%r9),%zmm29 + vmovdqu8 256-64(%r9),%zmm30 + + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + leaq 16(%rcx),%rax +.Lvaesenc_loop_first_4_vecs__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_first_4_vecs__func3 + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + jbe .Lghash_last_ciphertext_4x__func3 + vbroadcasti32x4 -144(%r11),%zmm15 + vbroadcasti32x4 -128(%r11),%zmm16 + vbroadcasti32x4 -112(%r11),%zmm17 + vbroadcasti32x4 -96(%r11),%zmm18 + vbroadcasti32x4 -80(%r11),%zmm19 + vbroadcasti32x4 -64(%r11),%zmm20 + vbroadcasti32x4 -48(%r11),%zmm21 + vbroadcasti32x4 -32(%r11),%zmm22 + vbroadcasti32x4 -16(%r11),%zmm23 +.Lcrypt_loop_4x__func3: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + cmpl $24,%r10d + jl .Laes128__func3 + je .Laes192__func3 + + vbroadcasti32x4 -208(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -192(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +.Laes192__func3: + vbroadcasti32x4 -176(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -160(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +.Laes128__func3: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + + vaesenc %zmm15,%zmm0,%zmm0 + vaesenc %zmm15,%zmm1,%zmm1 + vaesenc %zmm15,%zmm2,%zmm2 + vaesenc %zmm15,%zmm3,%zmm3 + + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + + vaesenc %zmm16,%zmm0,%zmm0 + vaesenc %zmm16,%zmm1,%zmm1 + vaesenc %zmm16,%zmm2,%zmm2 + vaesenc %zmm16,%zmm3,%zmm3 + + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + + vaesenc %zmm17,%zmm0,%zmm0 + vaesenc %zmm17,%zmm1,%zmm1 + vaesenc %zmm17,%zmm2,%zmm2 + vaesenc %zmm17,%zmm3,%zmm3 + + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + + vaesenc %zmm18,%zmm0,%zmm0 + vaesenc %zmm18,%zmm1,%zmm1 + vaesenc %zmm18,%zmm2,%zmm2 + vaesenc %zmm18,%zmm3,%zmm3 + + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + + vaesenc %zmm19,%zmm0,%zmm0 + vaesenc %zmm19,%zmm1,%zmm1 + vaesenc %zmm19,%zmm2,%zmm2 + vaesenc %zmm19,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + + vaesenc %zmm20,%zmm0,%zmm0 + vaesenc %zmm20,%zmm1,%zmm1 + vaesenc %zmm20,%zmm2,%zmm2 + vaesenc %zmm20,%zmm3,%zmm3 + + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + + vaesenc %zmm21,%zmm0,%zmm0 + vaesenc %zmm21,%zmm1,%zmm1 + vaesenc %zmm21,%zmm2,%zmm2 + vaesenc %zmm21,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + + vaesenc %zmm22,%zmm0,%zmm0 + vaesenc %zmm22,%zmm1,%zmm1 + vaesenc %zmm22,%zmm2,%zmm2 + vaesenc %zmm22,%zmm3,%zmm3 + + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + + vaesenc %zmm23,%zmm0,%zmm0 + vaesenc %zmm23,%zmm1,%zmm1 + vaesenc %zmm23,%zmm2,%zmm2 + vaesenc %zmm23,%zmm3,%zmm3 + + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + ja .Lcrypt_loop_4x__func3 +.Lghash_last_ciphertext_4x__func3: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + +.Lcrypt_loop_4x_done__func3: + + testq %rdx,%rdx + jz .Ldone__func3 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $64,%rdx + jb .Lpartial_vec__func3 + +.Lcrypt_loop_1x__func3: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_full_vec__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_full_vec__func3 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1 + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi) + + + vmovdqu8 (%r8),%zmm30 + vpshufb %zmm8,%zmm0,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $64,%r8 + addq $64,%rdi + addq $64,%rsi + subq $64,%rdx + cmpq $64,%rdx + jae .Lcrypt_loop_1x__func3 + + testq %rdx,%rdx + jz .Lreduce__func3 + +.Lpartial_vec__func3: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k2 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_partialvec__func3: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_partialvec__func3 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1{%k1}{z} + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%zmm30{%k2}{z} + vmovdqu8 %zmm0,%zmm1{%k1}{z} + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + +.Lreduce__func3: + + vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 + vpshufd $0x4e,%zmm4,%zmm4 + vpternlogd $0x96,%zmm0,%zmm4,%zmm5 + vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 + vpshufd $0x4e,%zmm5,%zmm5 + vpternlogd $0x96,%zmm0,%zmm5,%zmm6 + + vextracti32x4 $1,%zmm6,%xmm0 + vextracti32x4 $2,%zmm6,%xmm1 + vextracti32x4 $3,%zmm6,%xmm2 + vpxord %xmm0,%xmm6,%xmm10 + vpternlogd $0x96,%xmm1,%xmm2,%xmm10 + + +.Ldone__func3: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 +.cfi_adjust_cfa_offset -8 +.cfi_restore %r12 + ret + +.cfi_endproc +.size aes_gcm_enc_update_vaes_avx10_512, . - aes_gcm_enc_update_vaes_avx10_512 +.globl aes_gcm_dec_update_vaes_avx10_512 +.hidden aes_gcm_dec_update_vaes_avx10_512 +.type aes_gcm_dec_update_vaes_avx10_512,@function +.align 32 +aes_gcm_dec_update_vaes_avx10_512: +.cfi_startproc + +_CET_ENDBR + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + + movq 16(%rsp),%r12 + + vbroadcasti32x4 .Lbswap_mask(%rip),%zmm8 + vbroadcasti32x4 .Lgfpoly(%rip),%zmm31 + + + + vmovdqu (%r12),%xmm10 + vpshufb %xmm8,%xmm10,%xmm10 + vbroadcasti32x4 (%r8),%zmm12 + vpshufb %zmm8,%zmm12,%zmm12 + + + + movl 240(%rcx),%r10d + leal -20(,%r10,4),%r10d + + + + + leaq 96(%rcx,%r10,4),%r11 + vbroadcasti32x4 (%rcx),%zmm13 + vbroadcasti32x4 (%r11),%zmm14 + + + vpaddd .Lctr_pattern(%rip),%zmm12,%zmm12 + + + vbroadcasti32x4 .Linc_4blocks(%rip),%zmm11 + + + + cmpq $256-1,%rdx + jbe .Lcrypt_loop_4x_done__func4 + + + vmovdqu8 256-256(%r9),%zmm27 + vmovdqu8 256-192(%r9),%zmm28 + vmovdqu8 256-128(%r9),%zmm29 + vmovdqu8 256-64(%r9),%zmm30 + vbroadcasti32x4 -144(%r11),%zmm15 + vbroadcasti32x4 -128(%r11),%zmm16 + vbroadcasti32x4 -112(%r11),%zmm17 + vbroadcasti32x4 -96(%r11),%zmm18 + vbroadcasti32x4 -80(%r11),%zmm19 + vbroadcasti32x4 -64(%r11),%zmm20 + vbroadcasti32x4 -48(%r11),%zmm21 + vbroadcasti32x4 -32(%r11),%zmm22 + vbroadcasti32x4 -16(%r11),%zmm23 +.Lcrypt_loop_4x__func4: + vmovdqu8 0(%rdi),%zmm4 + vmovdqu8 64(%rdi),%zmm5 + vmovdqu8 128(%rdi),%zmm6 + vmovdqu8 192(%rdi),%zmm7 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm1 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm2 + vpaddd %zmm11,%zmm12,%zmm12 + vpshufb %zmm8,%zmm12,%zmm3 + vpaddd %zmm11,%zmm12,%zmm12 + + + vpxord %zmm13,%zmm0,%zmm0 + vpxord %zmm13,%zmm1,%zmm1 + vpxord %zmm13,%zmm2,%zmm2 + vpxord %zmm13,%zmm3,%zmm3 + + cmpl $24,%r10d + jl .Laes128__func4 + je .Laes192__func4 + + vbroadcasti32x4 -208(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -192(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +.Laes192__func4: + vbroadcasti32x4 -176(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + + vbroadcasti32x4 -160(%r11),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + vaesenc %zmm9,%zmm1,%zmm1 + vaesenc %zmm9,%zmm2,%zmm2 + vaesenc %zmm9,%zmm3,%zmm3 + +.Laes128__func4: + vpshufb %zmm8,%zmm4,%zmm4 + vpxord %zmm10,%zmm4,%zmm4 + vpshufb %zmm8,%zmm5,%zmm5 + vpshufb %zmm8,%zmm6,%zmm6 + + vaesenc %zmm15,%zmm0,%zmm0 + vaesenc %zmm15,%zmm1,%zmm1 + vaesenc %zmm15,%zmm2,%zmm2 + vaesenc %zmm15,%zmm3,%zmm3 + + vpshufb %zmm8,%zmm7,%zmm7 + vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 + vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 + vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 + + vaesenc %zmm16,%zmm0,%zmm0 + vaesenc %zmm16,%zmm1,%zmm1 + vaesenc %zmm16,%zmm2,%zmm2 + vaesenc %zmm16,%zmm3,%zmm3 + + vpxord %zmm24,%zmm10,%zmm10 + vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm10 + vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 + + vaesenc %zmm17,%zmm0,%zmm0 + vaesenc %zmm17,%zmm1,%zmm1 + vaesenc %zmm17,%zmm2,%zmm2 + vaesenc %zmm17,%zmm3,%zmm3 + + vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 + + vaesenc %zmm18,%zmm0,%zmm0 + vaesenc %zmm18,%zmm1,%zmm1 + vaesenc %zmm18,%zmm2,%zmm2 + vaesenc %zmm18,%zmm3,%zmm3 + + vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 + vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 + + vaesenc %zmm19,%zmm0,%zmm0 + vaesenc %zmm19,%zmm1,%zmm1 + vaesenc %zmm19,%zmm2,%zmm2 + vaesenc %zmm19,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm25,%zmm24 + vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 + vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 + vpxord %zmm25,%zmm24,%zmm24 + + vaesenc %zmm20,%zmm0,%zmm0 + vaesenc %zmm20,%zmm1,%zmm1 + vaesenc %zmm20,%zmm2,%zmm2 + vaesenc %zmm20,%zmm3,%zmm3 + + vpshufd $0x4e,%zmm10,%zmm10 + vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 + vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 + vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 + + vaesenc %zmm21,%zmm0,%zmm0 + vaesenc %zmm21,%zmm1,%zmm1 + vaesenc %zmm21,%zmm2,%zmm2 + vaesenc %zmm21,%zmm3,%zmm3 + + vpternlogd $0x96,%zmm26,%zmm10,%zmm24 + vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 + vpternlogd $0x96,%zmm6,%zmm5,%zmm4 + vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 + + vaesenc %zmm22,%zmm0,%zmm0 + vaesenc %zmm22,%zmm1,%zmm1 + vaesenc %zmm22,%zmm2,%zmm2 + vaesenc %zmm22,%zmm3,%zmm3 + + vpxord %zmm7,%zmm4,%zmm10 + vpshufd $0x4e,%zmm24,%zmm24 + vpternlogd $0x96,%zmm25,%zmm24,%zmm10 + + vaesenc %zmm23,%zmm0,%zmm0 + vaesenc %zmm23,%zmm1,%zmm1 + vaesenc %zmm23,%zmm2,%zmm2 + vaesenc %zmm23,%zmm3,%zmm3 + + vextracti32x4 $1,%zmm10,%xmm4 + vextracti32x4 $2,%zmm10,%xmm5 + vextracti32x4 $3,%zmm10,%xmm6 + vpxord %xmm4,%xmm10,%xmm10 + vpternlogd $0x96,%xmm5,%xmm6,%xmm10 + + + + + vpxord 0(%rdi),%zmm14,%zmm4 + vpxord 64(%rdi),%zmm14,%zmm5 + vpxord 128(%rdi),%zmm14,%zmm6 + vpxord 192(%rdi),%zmm14,%zmm7 + + + + vaesenclast %zmm4,%zmm0,%zmm4 + vaesenclast %zmm5,%zmm1,%zmm5 + vaesenclast %zmm6,%zmm2,%zmm6 + vaesenclast %zmm7,%zmm3,%zmm7 + + + vmovdqu8 %zmm4,0(%rsi) + vmovdqu8 %zmm5,64(%rsi) + vmovdqu8 %zmm6,128(%rsi) + vmovdqu8 %zmm7,192(%rsi) + + subq $-256,%rdi + subq $-256,%rsi + addq $-256,%rdx + cmpq $256-1,%rdx + ja .Lcrypt_loop_4x__func4 +.Lcrypt_loop_4x_done__func4: + + testq %rdx,%rdx + jz .Ldone__func4 + + + + + + + + + + + + + + + + + + + + + movq %rdx,%rax + negq %rax + andq $-16,%rax + leaq 256(%r9,%rax,1),%r8 + vpxor %xmm4,%xmm4,%xmm4 + vpxor %xmm5,%xmm5,%xmm5 + vpxor %xmm6,%xmm6,%xmm6 + + cmpq $64,%rdx + jb .Lpartial_vec__func4 + +.Lcrypt_loop_1x__func4: + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpaddd %zmm11,%zmm12,%zmm12 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_full_vec__func4: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_full_vec__func4 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1 + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi) + + + vmovdqu8 (%r8),%zmm30 + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + vpxor %xmm10,%xmm10,%xmm10 + + addq $64,%r8 + addq $64,%rdi + addq $64,%rsi + subq $64,%rdx + cmpq $64,%rdx + jae .Lcrypt_loop_1x__func4 + + testq %rdx,%rdx + jz .Lreduce__func4 + +.Lpartial_vec__func4: + + + + + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k1 + addq $15,%rdx + andq $-16,%rdx + movq $-1,%rax + bzhiq %rdx,%rax,%rax + kmovq %rax,%k2 + + + + vpshufb %zmm8,%zmm12,%zmm0 + vpxord %zmm13,%zmm0,%zmm0 + leaq 16(%rcx),%rax +.Lvaesenc_loop_tail_partialvec__func4: + vbroadcasti32x4 (%rax),%zmm9 + vaesenc %zmm9,%zmm0,%zmm0 + addq $16,%rax + cmpq %rax,%r11 + jne .Lvaesenc_loop_tail_partialvec__func4 + vaesenclast %zmm14,%zmm0,%zmm0 + + + vmovdqu8 (%rdi),%zmm1{%k1}{z} + vpxord %zmm1,%zmm0,%zmm0 + vmovdqu8 %zmm0,(%rsi){%k1} + + + + + + + + + + + + + + vmovdqu8 (%r8),%zmm30{%k2}{z} + + vpshufb %zmm8,%zmm1,%zmm0 + vpxord %zmm10,%zmm0,%zmm0 + vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 + vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 + vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 + vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 + vpxord %zmm7,%zmm4,%zmm4 + vpternlogd $0x96,%zmm2,%zmm1,%zmm5 + vpxord %zmm3,%zmm6,%zmm6 + + +.Lreduce__func4: + + vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 + vpshufd $0x4e,%zmm4,%zmm4 + vpternlogd $0x96,%zmm0,%zmm4,%zmm5 + vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 + vpshufd $0x4e,%zmm5,%zmm5 + vpternlogd $0x96,%zmm0,%zmm5,%zmm6 + + vextracti32x4 $1,%zmm6,%xmm0 + vextracti32x4 $2,%zmm6,%xmm1 + vextracti32x4 $3,%zmm6,%xmm2 + vpxord %xmm0,%xmm6,%xmm10 + vpternlogd $0x96,%xmm1,%xmm2,%xmm10 + + +.Ldone__func4: + + vpshufb %xmm8,%xmm10,%xmm10 + vmovdqu %xmm10,(%r12) + + vzeroupper + popq %r12 +.cfi_adjust_cfa_offset -8 +.cfi_restore %r12 + ret + +.cfi_endproc +.size aes_gcm_dec_update_vaes_avx10_512, . - aes_gcm_dec_update_vaes_avx10_512 +#endif +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif + diff --git a/Sources/CCryptoBoringSSL/gen/crypto/err_data.cc b/Sources/CCryptoBoringSSL/gen/crypto/err_data.cc index 6a83865e..ccc09052 100644 --- a/Sources/CCryptoBoringSSL/gen/crypto/err_data.cc +++ b/Sources/CCryptoBoringSSL/gen/crypto/err_data.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/hash.txt b/Sources/CCryptoBoringSSL/hash.txt index 9844d768..f8a6ea03 100644 --- a/Sources/CCryptoBoringSSL/hash.txt +++ b/Sources/CCryptoBoringSSL/hash.txt @@ -1 +1 @@ -This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision fcef13a49852397a0d39c00be8d7bc2ba1ab6fb9 +This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision aefa5d24da34ef77ac797bdbe684734e5bd870f4 diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_aead.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_aead.h index 5ae9a4dd..d973b7cc 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_aead.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_aead.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asm_base.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asm_base.h index c84d6cb0..ab6c13e8 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asm_base.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asm_base.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1_mac.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1_mac.h index 419a85ea..7d4a9399 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1_mac.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1_mac.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1t.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1t.h index 3e55b93b..74d15700 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1t.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_asn1t.h @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 2000. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 2000. */ /* ==================================================================== * Copyright (c) 2000-2005 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bcm_public.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bcm_public.h index 8abc6862..43577bbe 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bcm_public.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bcm_public.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_blake2.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_blake2.h index 5c06d081..d577ab4e 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_blake2.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_blake2.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Google Inc. +/* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols.h index 81701320..0a21a26f 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018, Google Inc. +// Copyright 2018 The BoringSSL Authors // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -208,6 +208,30 @@ #define BASIC_CONSTRAINTS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_it) #define BASIC_CONSTRAINTS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_new) #define BCM_fips_186_2_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_fips_186_2_prf) +#define BCM_mldsa65_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_generate_key) +#define BCM_mldsa65_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_generate_key_external_entropy) +#define BCM_mldsa65_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_marshal_private_key) +#define BCM_mldsa65_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_marshal_public_key) +#define BCM_mldsa65_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_parse_private_key) +#define BCM_mldsa65_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_parse_public_key) +#define BCM_mldsa65_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_private_key_from_seed) +#define BCM_mldsa65_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_public_from_private) +#define BCM_mldsa65_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_sign) +#define BCM_mldsa65_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_sign_internal) +#define BCM_mldsa65_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_verify) +#define BCM_mldsa65_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_verify_internal) +#define BCM_mldsa87_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_generate_key) +#define BCM_mldsa87_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_generate_key_external_entropy) +#define BCM_mldsa87_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_marshal_private_key) +#define BCM_mldsa87_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_marshal_public_key) +#define BCM_mldsa87_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_parse_private_key) +#define BCM_mldsa87_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_parse_public_key) +#define BCM_mldsa87_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_private_key_from_seed) +#define BCM_mldsa87_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_public_from_private) +#define BCM_mldsa87_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_sign) +#define BCM_mldsa87_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_sign_internal) +#define BCM_mldsa87_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_verify) +#define BCM_mldsa87_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_verify_internal) #define BCM_rand_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes) #define BCM_rand_bytes_hwrng BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes_hwrng) #define BCM_rand_bytes_with_additional_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes_with_additional_data) @@ -242,7 +266,6 @@ #define BIO_ctrl_get_read_request BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_get_read_request) #define BIO_ctrl_get_write_guarantee BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_get_write_guarantee) #define BIO_ctrl_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_pending) -#define BIO_do_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_do_connect) #define BIO_eof BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_eof) #define BIO_find_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_find_type) #define BIO_flush BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_flush) @@ -277,12 +300,10 @@ #define BIO_method_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_method_type) #define BIO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new) #define BIO_new_bio_pair BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_bio_pair) -#define BIO_new_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_connect) #define BIO_new_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_fd) #define BIO_new_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_file) #define BIO_new_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_fp) #define BIO_new_mem_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_mem_buf) -#define BIO_new_socket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_socket) #define BIO_next BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_next) #define BIO_number_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_number_read) #define BIO_number_written BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_number_written) @@ -297,16 +318,11 @@ #define BIO_read_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_read_filename) #define BIO_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_reset) #define BIO_rw_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_rw_filename) -#define BIO_s_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_connect) #define BIO_s_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_fd) #define BIO_s_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_file) #define BIO_s_mem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_mem) -#define BIO_s_socket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_socket) #define BIO_seek BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_seek) #define BIO_set_close BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_close) -#define BIO_set_conn_hostname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_hostname) -#define BIO_set_conn_int_port BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_int_port) -#define BIO_set_conn_port BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_port) #define BIO_set_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_data) #define BIO_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_ex_data) #define BIO_set_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_fd) @@ -315,7 +331,6 @@ #define BIO_set_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_init) #define BIO_set_mem_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_mem_buf) #define BIO_set_mem_eof_return BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_mem_eof_return) -#define BIO_set_nbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_nbio) #define BIO_set_retry_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_read) #define BIO_set_retry_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_reason) #define BIO_set_retry_special BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_special) @@ -639,6 +654,7 @@ #define CRYPTO_cfb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cfb128_encrypt) #define CRYPTO_chacha_20 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_chacha_20) #define CRYPTO_cleanup_all_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cleanup_all_ex_data) +#define CRYPTO_cpu_avoid_zmm_registers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cpu_avoid_zmm_registers) #define CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cpu_perf_is_like_silvermont) #define CRYPTO_ctr128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt) #define CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) @@ -677,6 +693,8 @@ #define CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA256_capable) #define CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA512_capable) #define CRYPTO_is_AVX2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX2_capable) +#define CRYPTO_is_AVX512BW_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX512BW_capable) +#define CRYPTO_is_AVX512VL_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX512VL_capable) #define CRYPTO_is_AVX_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX_capable) #define CRYPTO_is_BMI1_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_BMI1_capable) #define CRYPTO_is_BMI2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_BMI2_capable) @@ -687,6 +705,8 @@ #define CRYPTO_is_RDRAND_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_RDRAND_capable) #define CRYPTO_is_SSE4_1_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_SSE4_1_capable) #define CRYPTO_is_SSSE3_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_SSSE3_capable) +#define CRYPTO_is_VAES_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_VAES_capable) +#define CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_VPCLMULQDQ_capable) #define CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define CRYPTO_is_intel_cpu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_intel_cpu) #define CRYPTO_is_x86_SHA_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_x86_SHA_capable) @@ -781,16 +801,6 @@ #define DH_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_size) #define DH_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_up_ref) #define DHparams_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DHparams_dup) -#define DILITHIUM_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_generate_key) -#define DILITHIUM_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_generate_key_external_entropy) -#define DILITHIUM_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_marshal_private_key) -#define DILITHIUM_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_marshal_public_key) -#define DILITHIUM_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_parse_private_key) -#define DILITHIUM_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_parse_public_key) -#define DILITHIUM_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_public_from_private) -#define DILITHIUM_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_sign) -#define DILITHIUM_sign_deterministic BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_sign_deterministic) -#define DILITHIUM_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DILITHIUM_verify) #define DIRECTORYSTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_free) #define DIRECTORYSTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_it) #define DIRECTORYSTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_new) @@ -1137,6 +1147,7 @@ #define EVP_MD_CTX_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_create) #define EVP_MD_CTX_destroy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_destroy) #define EVP_MD_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_free) +#define EVP_MD_CTX_get0_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_get0_md) #define EVP_MD_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_init) #define EVP_MD_CTX_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_md) #define EVP_MD_CTX_move BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_move) @@ -1418,17 +1429,13 @@ #define METHOD_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, METHOD_ref) #define METHOD_unref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, METHOD_unref) #define MLDSA65_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_generate_key) -#define MLDSA65_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_generate_key_external_entropy) -#define MLDSA65_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_marshal_private_key) #define MLDSA65_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_marshal_public_key) #define MLDSA65_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_parse_private_key) #define MLDSA65_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_parse_public_key) #define MLDSA65_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_private_key_from_seed) #define MLDSA65_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_public_from_private) #define MLDSA65_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_sign) -#define MLDSA65_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_sign_internal) #define MLDSA65_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_verify) -#define MLDSA65_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_verify_internal) #define MLKEM1024_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_decap) #define MLKEM1024_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_encap) #define MLKEM1024_encap_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_encap_external_entropy) @@ -1476,8 +1483,6 @@ #define NOTICEREF_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_free) #define NOTICEREF_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_it) #define NOTICEREF_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_new) -#define OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER) -#define OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER) #define OBJ_cbs2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cbs2nid) #define OBJ_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cleanup) #define OBJ_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cmp) @@ -1857,10 +1862,6 @@ #define SPAKE2_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_CTX_new) #define SPAKE2_generate_msg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_generate_msg) #define SPAKE2_process_msg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_process_msg) -#define SPX_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPX_generate_key) -#define SPX_generate_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPX_generate_key_from_seed) -#define SPX_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPX_sign) -#define SPX_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPX_verify) #define SSLeay BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLeay) #define SSLeay_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLeay_version) #define TRUST_TOKEN_CLIENT_add_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_add_key) @@ -2363,7 +2364,11 @@ #define aes256gcmsiv_kdf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_kdf) #define aes_ctr_set_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_ctr_set_key) #define aes_gcm_dec_kernel BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_kernel) +#define aes_gcm_dec_update_vaes_avx10_256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_256) +#define aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_512) #define aes_gcm_enc_kernel BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_kernel) +#define aes_gcm_enc_update_vaes_avx10_256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_256) +#define aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_512) #define aes_hw_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_cbc_encrypt) #define aes_hw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_ctr32_encrypt_blocks) #define aes_hw_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_decrypt) @@ -2406,13 +2411,10 @@ #define asn1_type_set0_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_type_set0_string) #define asn1_type_value_as_pointer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_type_value_as_pointer) #define asn1_utctime_to_tm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_utctime_to_tm) +#define bcm_as_approved_status BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bcm_as_approved_status) +#define bcm_success BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bcm_success) #define beeu_mod_inverse_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, beeu_mod_inverse_vartime) -#define bio_clear_socket_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_clear_socket_error) #define bio_errno_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_errno_should_retry) -#define bio_ip_and_port_to_socket_and_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_ip_and_port_to_socket_and_addr) -#define bio_sock_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_sock_error) -#define bio_socket_nbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_socket_nbio) -#define bio_socket_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_socket_should_retry) #define bn_abs_sub_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_abs_sub_consttime) #define bn_add_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_add_words) #define bn_assert_fits_in_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_assert_fits_in_bytes) @@ -2775,18 +2777,22 @@ #define gcm_ghash_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_nohw) #define gcm_ghash_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_ssse3) #define gcm_ghash_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_v8) +#define gcm_ghash_vpclmulqdq_avx10_256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_256) +#define gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_512) #define gcm_gmult_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_avx) #define gcm_gmult_clmul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_clmul) #define gcm_gmult_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_neon) #define gcm_gmult_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_nohw) #define gcm_gmult_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_ssse3) #define gcm_gmult_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_v8) +#define gcm_gmult_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx10) #define gcm_init_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_avx) #define gcm_init_clmul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_clmul) #define gcm_init_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_neon) #define gcm_init_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_nohw) #define gcm_init_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_ssse3) #define gcm_init_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_v8) +#define gcm_init_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx10) #define gcm_neon_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_neon_capable) #define gcm_pmull_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_pmull_capable) #define have_fast_rdrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, have_fast_rdrand) @@ -3268,39 +3274,6 @@ #define slhdsa_wots_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_wots_sign) #define slhdsa_xmss_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_xmss_pk_from_sig) #define slhdsa_xmss_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_xmss_sign) -#define spx_base_b BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_base_b) -#define spx_copy_keypair_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_copy_keypair_addr) -#define spx_fors_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_fors_pk_from_sig) -#define spx_fors_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_fors_sign) -#define spx_fors_sk_gen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_fors_sk_gen) -#define spx_fors_treehash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_fors_treehash) -#define spx_get_tree_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_get_tree_index) -#define spx_ht_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_ht_sign) -#define spx_ht_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_ht_verify) -#define spx_set_chain_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_chain_addr) -#define spx_set_hash_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_hash_addr) -#define spx_set_keypair_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_keypair_addr) -#define spx_set_layer_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_layer_addr) -#define spx_set_tree_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_tree_addr) -#define spx_set_tree_height BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_tree_height) -#define spx_set_tree_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_tree_index) -#define spx_set_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_set_type) -#define spx_thash_f BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_f) -#define spx_thash_h BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_h) -#define spx_thash_hmsg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_hmsg) -#define spx_thash_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_prf) -#define spx_thash_prfmsg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_prfmsg) -#define spx_thash_tk BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_tk) -#define spx_thash_tl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_thash_tl) -#define spx_to_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_to_uint64) -#define spx_treehash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_treehash) -#define spx_uint64_to_len_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_uint64_to_len_bytes) -#define spx_wots_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_wots_pk_from_sig) -#define spx_wots_pk_gen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_wots_pk_gen) -#define spx_wots_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_wots_sign) -#define spx_xmss_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_xmss_pk_from_sig) -#define spx_xmss_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, spx_xmss_sign) -#define swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE) #define v2i_GENERAL_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAME) #define v2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAMES) #define v2i_GENERAL_NAME_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAME_ex) diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols_asm.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols_asm.h index f42d3d3f..70767062 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols_asm.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_boringssl_prefix_symbols_asm.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018, Google Inc. +// Copyright 2018 The BoringSSL Authors // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -213,6 +213,30 @@ #define _BASIC_CONSTRAINTS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_it) #define _BASIC_CONSTRAINTS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_new) #define _BCM_fips_186_2_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_fips_186_2_prf) +#define _BCM_mldsa65_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_generate_key) +#define _BCM_mldsa65_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_generate_key_external_entropy) +#define _BCM_mldsa65_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_marshal_private_key) +#define _BCM_mldsa65_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_marshal_public_key) +#define _BCM_mldsa65_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_parse_private_key) +#define _BCM_mldsa65_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_parse_public_key) +#define _BCM_mldsa65_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_private_key_from_seed) +#define _BCM_mldsa65_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_public_from_private) +#define _BCM_mldsa65_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_sign) +#define _BCM_mldsa65_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_sign_internal) +#define _BCM_mldsa65_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_verify) +#define _BCM_mldsa65_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_verify_internal) +#define _BCM_mldsa87_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_generate_key) +#define _BCM_mldsa87_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_generate_key_external_entropy) +#define _BCM_mldsa87_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_marshal_private_key) +#define _BCM_mldsa87_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_marshal_public_key) +#define _BCM_mldsa87_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_parse_private_key) +#define _BCM_mldsa87_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_parse_public_key) +#define _BCM_mldsa87_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_private_key_from_seed) +#define _BCM_mldsa87_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_public_from_private) +#define _BCM_mldsa87_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_sign) +#define _BCM_mldsa87_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_sign_internal) +#define _BCM_mldsa87_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_verify) +#define _BCM_mldsa87_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_verify_internal) #define _BCM_rand_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes) #define _BCM_rand_bytes_hwrng BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes_hwrng) #define _BCM_rand_bytes_with_additional_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes_with_additional_data) @@ -247,7 +271,6 @@ #define _BIO_ctrl_get_read_request BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_get_read_request) #define _BIO_ctrl_get_write_guarantee BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_get_write_guarantee) #define _BIO_ctrl_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_pending) -#define _BIO_do_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_do_connect) #define _BIO_eof BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_eof) #define _BIO_find_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_find_type) #define _BIO_flush BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_flush) @@ -282,12 +305,10 @@ #define _BIO_method_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_method_type) #define _BIO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new) #define _BIO_new_bio_pair BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_bio_pair) -#define _BIO_new_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_connect) #define _BIO_new_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_fd) #define _BIO_new_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_file) #define _BIO_new_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_fp) #define _BIO_new_mem_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_mem_buf) -#define _BIO_new_socket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_socket) #define _BIO_next BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_next) #define _BIO_number_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_number_read) #define _BIO_number_written BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_number_written) @@ -302,16 +323,11 @@ #define _BIO_read_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_read_filename) #define _BIO_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_reset) #define _BIO_rw_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_rw_filename) -#define _BIO_s_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_connect) #define _BIO_s_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_fd) #define _BIO_s_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_file) #define _BIO_s_mem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_mem) -#define _BIO_s_socket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_socket) #define _BIO_seek BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_seek) #define _BIO_set_close BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_close) -#define _BIO_set_conn_hostname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_hostname) -#define _BIO_set_conn_int_port BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_int_port) -#define _BIO_set_conn_port BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_port) #define _BIO_set_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_data) #define _BIO_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_ex_data) #define _BIO_set_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_fd) @@ -320,7 +336,6 @@ #define _BIO_set_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_init) #define _BIO_set_mem_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_mem_buf) #define _BIO_set_mem_eof_return BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_mem_eof_return) -#define _BIO_set_nbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_nbio) #define _BIO_set_retry_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_read) #define _BIO_set_retry_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_reason) #define _BIO_set_retry_special BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_special) @@ -644,6 +659,7 @@ #define _CRYPTO_cfb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cfb128_encrypt) #define _CRYPTO_chacha_20 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_chacha_20) #define _CRYPTO_cleanup_all_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cleanup_all_ex_data) +#define _CRYPTO_cpu_avoid_zmm_registers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cpu_avoid_zmm_registers) #define _CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cpu_perf_is_like_silvermont) #define _CRYPTO_ctr128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt) #define _CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) @@ -682,6 +698,8 @@ #define _CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA256_capable) #define _CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA512_capable) #define _CRYPTO_is_AVX2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX2_capable) +#define _CRYPTO_is_AVX512BW_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX512BW_capable) +#define _CRYPTO_is_AVX512VL_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX512VL_capable) #define _CRYPTO_is_AVX_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX_capable) #define _CRYPTO_is_BMI1_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_BMI1_capable) #define _CRYPTO_is_BMI2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_BMI2_capable) @@ -692,6 +710,8 @@ #define _CRYPTO_is_RDRAND_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_RDRAND_capable) #define _CRYPTO_is_SSE4_1_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_SSE4_1_capable) #define _CRYPTO_is_SSSE3_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_SSSE3_capable) +#define _CRYPTO_is_VAES_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_VAES_capable) +#define _CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_VPCLMULQDQ_capable) #define _CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define _CRYPTO_is_intel_cpu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_intel_cpu) #define _CRYPTO_is_x86_SHA_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_x86_SHA_capable) @@ -786,16 +806,6 @@ #define _DH_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_size) #define _DH_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_up_ref) #define _DHparams_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DHparams_dup) -#define _DILITHIUM_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_generate_key) -#define _DILITHIUM_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_generate_key_external_entropy) -#define _DILITHIUM_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_marshal_private_key) -#define _DILITHIUM_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_marshal_public_key) -#define _DILITHIUM_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_parse_private_key) -#define _DILITHIUM_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_parse_public_key) -#define _DILITHIUM_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_public_from_private) -#define _DILITHIUM_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_sign) -#define _DILITHIUM_sign_deterministic BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_sign_deterministic) -#define _DILITHIUM_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DILITHIUM_verify) #define _DIRECTORYSTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_free) #define _DIRECTORYSTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_it) #define _DIRECTORYSTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_new) @@ -1142,6 +1152,7 @@ #define _EVP_MD_CTX_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_create) #define _EVP_MD_CTX_destroy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_destroy) #define _EVP_MD_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_free) +#define _EVP_MD_CTX_get0_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_get0_md) #define _EVP_MD_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_init) #define _EVP_MD_CTX_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_md) #define _EVP_MD_CTX_move BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_move) @@ -1423,17 +1434,13 @@ #define _METHOD_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, METHOD_ref) #define _METHOD_unref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, METHOD_unref) #define _MLDSA65_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_generate_key) -#define _MLDSA65_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_generate_key_external_entropy) -#define _MLDSA65_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_marshal_private_key) #define _MLDSA65_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_marshal_public_key) #define _MLDSA65_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_parse_private_key) #define _MLDSA65_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_parse_public_key) #define _MLDSA65_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_private_key_from_seed) #define _MLDSA65_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_public_from_private) #define _MLDSA65_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_sign) -#define _MLDSA65_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_sign_internal) #define _MLDSA65_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_verify) -#define _MLDSA65_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_verify_internal) #define _MLKEM1024_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_decap) #define _MLKEM1024_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_encap) #define _MLKEM1024_encap_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_encap_external_entropy) @@ -1481,8 +1488,6 @@ #define _NOTICEREF_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_free) #define _NOTICEREF_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_it) #define _NOTICEREF_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_new) -#define _OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER) -#define _OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER) #define _OBJ_cbs2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cbs2nid) #define _OBJ_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cleanup) #define _OBJ_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cmp) @@ -1862,10 +1867,6 @@ #define _SPAKE2_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_CTX_new) #define _SPAKE2_generate_msg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_generate_msg) #define _SPAKE2_process_msg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_process_msg) -#define _SPX_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPX_generate_key) -#define _SPX_generate_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPX_generate_key_from_seed) -#define _SPX_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPX_sign) -#define _SPX_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPX_verify) #define _SSLeay BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLeay) #define _SSLeay_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLeay_version) #define _TRUST_TOKEN_CLIENT_add_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_add_key) @@ -2368,7 +2369,11 @@ #define _aes256gcmsiv_kdf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_kdf) #define _aes_ctr_set_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_ctr_set_key) #define _aes_gcm_dec_kernel BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_kernel) +#define _aes_gcm_dec_update_vaes_avx10_256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_256) +#define _aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_512) #define _aes_gcm_enc_kernel BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_kernel) +#define _aes_gcm_enc_update_vaes_avx10_256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_256) +#define _aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_512) #define _aes_hw_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_cbc_encrypt) #define _aes_hw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_ctr32_encrypt_blocks) #define _aes_hw_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_decrypt) @@ -2411,13 +2416,10 @@ #define _asn1_type_set0_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_type_set0_string) #define _asn1_type_value_as_pointer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_type_value_as_pointer) #define _asn1_utctime_to_tm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_utctime_to_tm) +#define _bcm_as_approved_status BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bcm_as_approved_status) +#define _bcm_success BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bcm_success) #define _beeu_mod_inverse_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, beeu_mod_inverse_vartime) -#define _bio_clear_socket_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_clear_socket_error) #define _bio_errno_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_errno_should_retry) -#define _bio_ip_and_port_to_socket_and_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_ip_and_port_to_socket_and_addr) -#define _bio_sock_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_sock_error) -#define _bio_socket_nbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_socket_nbio) -#define _bio_socket_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_socket_should_retry) #define _bn_abs_sub_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_abs_sub_consttime) #define _bn_add_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_add_words) #define _bn_assert_fits_in_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_assert_fits_in_bytes) @@ -2780,18 +2782,22 @@ #define _gcm_ghash_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_nohw) #define _gcm_ghash_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_ssse3) #define _gcm_ghash_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_v8) +#define _gcm_ghash_vpclmulqdq_avx10_256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_256) +#define _gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_512) #define _gcm_gmult_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_avx) #define _gcm_gmult_clmul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_clmul) #define _gcm_gmult_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_neon) #define _gcm_gmult_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_nohw) #define _gcm_gmult_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_ssse3) #define _gcm_gmult_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_v8) +#define _gcm_gmult_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx10) #define _gcm_init_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_avx) #define _gcm_init_clmul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_clmul) #define _gcm_init_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_neon) #define _gcm_init_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_nohw) #define _gcm_init_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_ssse3) #define _gcm_init_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_v8) +#define _gcm_init_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx10) #define _gcm_neon_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_neon_capable) #define _gcm_pmull_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_pmull_capable) #define _have_fast_rdrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, have_fast_rdrand) @@ -3273,39 +3279,6 @@ #define _slhdsa_wots_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_wots_sign) #define _slhdsa_xmss_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_xmss_pk_from_sig) #define _slhdsa_xmss_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_xmss_sign) -#define _spx_base_b BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_base_b) -#define _spx_copy_keypair_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_copy_keypair_addr) -#define _spx_fors_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_fors_pk_from_sig) -#define _spx_fors_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_fors_sign) -#define _spx_fors_sk_gen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_fors_sk_gen) -#define _spx_fors_treehash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_fors_treehash) -#define _spx_get_tree_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_get_tree_index) -#define _spx_ht_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_ht_sign) -#define _spx_ht_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_ht_verify) -#define _spx_set_chain_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_chain_addr) -#define _spx_set_hash_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_hash_addr) -#define _spx_set_keypair_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_keypair_addr) -#define _spx_set_layer_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_layer_addr) -#define _spx_set_tree_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_tree_addr) -#define _spx_set_tree_height BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_tree_height) -#define _spx_set_tree_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_tree_index) -#define _spx_set_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_set_type) -#define _spx_thash_f BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_f) -#define _spx_thash_h BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_h) -#define _spx_thash_hmsg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_hmsg) -#define _spx_thash_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_prf) -#define _spx_thash_prfmsg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_prfmsg) -#define _spx_thash_tk BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_tk) -#define _spx_thash_tl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_thash_tl) -#define _spx_to_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_to_uint64) -#define _spx_treehash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_treehash) -#define _spx_uint64_to_len_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_uint64_to_len_bytes) -#define _spx_wots_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_wots_pk_from_sig) -#define _spx_wots_pk_gen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_wots_pk_gen) -#define _spx_wots_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_wots_sign) -#define _spx_xmss_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_xmss_pk_from_sig) -#define _spx_xmss_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, spx_xmss_sign) -#define _swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE) #define _v2i_GENERAL_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAME) #define _v2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAMES) #define _v2i_GENERAL_NAME_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAME_ex) diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_buffer.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_buffer.h index 3c69d42c..f687da37 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_buffer.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_buffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bytestring.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bytestring.h index 3e8092a9..dba6114a 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bytestring.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_bytestring.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_chacha.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_chacha.h index 59f4a872..8fe058ea 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_chacha.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_chacha.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cmac.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cmac.h index 355babdd..6994e8bf 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cmac.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cmac.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cpu.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cpu.h index d4226e02..441dc734 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cpu.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_cpu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_crypto.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_crypto.h index 092cd4c6..0b27bae4 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_crypto.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_crypto.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ctrdrbg.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ctrdrbg.h index 9c15b5ae..8b1242e8 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ctrdrbg.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ctrdrbg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_curve25519.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_curve25519.h index c1cf4e82..7f647f99 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_curve25519.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_curve25519.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_digest.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_digest.h index 99b50d85..af6c8005 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_digest.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_digest.h @@ -226,8 +226,13 @@ OPENSSL_EXPORT size_t EVP_MD_block_size(const EVP_MD *md); // Digest operation accessors. +// EVP_MD_CTX_get0_md returns the underlying digest function, or NULL if one has +// not been set. +OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_get0_md(const EVP_MD_CTX *ctx); + // EVP_MD_CTX_md returns the underlying digest function, or NULL if one has not -// been set. +// been set. (This is the same as |EVP_MD_CTX_get0_md| but OpenSSL has +// deprecated this spelling.) OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx); // EVP_MD_CTX_size returns the digest size of |ctx|, in bytes. It diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_e_os2.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_e_os2.h index 97a94668..4c372cfa 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_e_os2.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_e_os2.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_engine.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_engine.h index bf74ca10..b12e0215 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_engine.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_engine.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hkdf.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hkdf.h index 76c8eaac..c08a7a80 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hkdf.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hkdf.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hpke.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hpke.h index caf048c0..a609f8ac 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hpke.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hpke.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hrss.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hrss.h index 71daed31..bc02c26a 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hrss.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_hrss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, Google Inc. +/* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_is_boringssl.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_is_boringssl.h index 302cbe29..ee9c95c2 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_is_boringssl.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_is_boringssl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_kdf.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_kdf.h index b0f94c67..606e1c1b 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_kdf.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_kdf.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mldsa.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mldsa.h index a99e6434..3025870b 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mldsa.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mldsa.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mlkem.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mlkem.h index 4472aa39..5c94a57d 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mlkem.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_mlkem.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_obj_mac.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_obj_mac.h index f27dd59a..3d5c7e25 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_obj_mac.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_obj_mac.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_objects.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_objects.h index 4ddc3076..b1806e00 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_objects.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_objects.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslconf.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslconf.h index feb9246c..cfbfb4d8 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslconf.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslconf.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslv.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslv.h index 1eeebbdb..524a6f17 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslv.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_opensslv.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ossl_typ.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ossl_typ.h index 1d8f6e66..2c164cd9 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ossl_typ.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_ossl_typ.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs12.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs12.h index 1284dcca..a71999df 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs12.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs12.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs7.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs7.h index 6d1325b0..f36c4802 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs7.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs7.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs8.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs8.h index 4c9615c5..1c09ed72 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs8.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pkcs8.h @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_poly1305.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_poly1305.h index f48d2e2a..41adce22 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_poly1305.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_poly1305.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pool.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pool.h index de07f858..aed6b083 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pool.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_pool.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, Google Inc. +/* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_posix_time.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_posix_time.h index 740096fc..8e7aecdf 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_posix_time.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_posix_time.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, Google Inc. +/* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_rand.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_rand.h index 5ca7fe94..c229a979 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_rand.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_rand.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_safestack.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_safestack.h index 6e5e4330..28090d98 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_safestack.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_safestack.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_siphash.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_siphash.h index b851401b..d015754c 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_siphash.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_siphash.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, Google Inc. +/* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_slhdsa.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_slhdsa.h index a79fa512..694ef9d8 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_slhdsa.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_slhdsa.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google LLC +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_span.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_span.h index af6f0504..dbe5b774 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_span.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_span.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Google Inc. +/* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_target.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_target.h index 2760f52c..71ce6034 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_target.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_target.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_time.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_time.h index 4f879661..11ab346c 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_time.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_time.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2024, Google Inc. +/* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_trust_token.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_trust_token.h index 330b18bd..6c24c74a 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_trust_token.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_trust_token.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020, Google Inc. +/* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509_vfy.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509_vfy.h index 65ce4829..2a99c33b 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509_vfy.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509_vfy.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Google Inc. +/* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3.h index 3501b90a..8ccde132 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3_errors.h b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3_errors.h index 293d268d..6806d8a2 100644 --- a/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3_errors.h +++ b/Sources/CCryptoBoringSSL/include/CCryptoBoringSSL_x509v3_errors.h @@ -1,5 +1,5 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. */ +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project + * 1999. */ /* ==================================================================== * Copyright (c) 1999-2004 The OpenSSL Project. All rights reserved. * diff --git a/Sources/CCryptoBoringSSL/include/boringssl_prefix_symbols_nasm.inc b/Sources/CCryptoBoringSSL/include/boringssl_prefix_symbols_nasm.inc index 9e089b92..65591b66 100644 --- a/Sources/CCryptoBoringSSL/include/boringssl_prefix_symbols_nasm.inc +++ b/Sources/CCryptoBoringSSL/include/boringssl_prefix_symbols_nasm.inc @@ -1,4 +1,4 @@ -; Copyright (c) 2018, Google Inc. +; Copyright 2018 The BoringSSL Authors ; ; Permission to use, copy, modify, and/or distribute this software for any ; purpose with or without fee is hereby granted, provided that the above @@ -205,6 +205,30 @@ %xdefine _BASIC_CONSTRAINTS_it _ %+ BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_it %xdefine _BASIC_CONSTRAINTS_new _ %+ BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_new %xdefine _BCM_fips_186_2_prf _ %+ BORINGSSL_PREFIX %+ _BCM_fips_186_2_prf +%xdefine _BCM_mldsa65_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key +%xdefine _BCM_mldsa65_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key_external_entropy +%xdefine _BCM_mldsa65_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_private_key +%xdefine _BCM_mldsa65_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_public_key +%xdefine _BCM_mldsa65_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_private_key +%xdefine _BCM_mldsa65_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_public_key +%xdefine _BCM_mldsa65_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_private_key_from_seed +%xdefine _BCM_mldsa65_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_public_from_private +%xdefine _BCM_mldsa65_sign _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_sign +%xdefine _BCM_mldsa65_sign_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_sign_internal +%xdefine _BCM_mldsa65_verify _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_verify +%xdefine _BCM_mldsa65_verify_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_verify_internal +%xdefine _BCM_mldsa87_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key +%xdefine _BCM_mldsa87_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key_external_entropy +%xdefine _BCM_mldsa87_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_private_key +%xdefine _BCM_mldsa87_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_public_key +%xdefine _BCM_mldsa87_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_private_key +%xdefine _BCM_mldsa87_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_public_key +%xdefine _BCM_mldsa87_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_private_key_from_seed +%xdefine _BCM_mldsa87_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_public_from_private +%xdefine _BCM_mldsa87_sign _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_sign +%xdefine _BCM_mldsa87_sign_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_sign_internal +%xdefine _BCM_mldsa87_verify _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_verify +%xdefine _BCM_mldsa87_verify_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_verify_internal %xdefine _BCM_rand_bytes _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes %xdefine _BCM_rand_bytes_hwrng _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes_hwrng %xdefine _BCM_rand_bytes_with_additional_data _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes_with_additional_data @@ -239,7 +263,6 @@ %xdefine _BIO_ctrl_get_read_request _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_get_read_request %xdefine _BIO_ctrl_get_write_guarantee _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_get_write_guarantee %xdefine _BIO_ctrl_pending _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_pending -%xdefine _BIO_do_connect _ %+ BORINGSSL_PREFIX %+ _BIO_do_connect %xdefine _BIO_eof _ %+ BORINGSSL_PREFIX %+ _BIO_eof %xdefine _BIO_find_type _ %+ BORINGSSL_PREFIX %+ _BIO_find_type %xdefine _BIO_flush _ %+ BORINGSSL_PREFIX %+ _BIO_flush @@ -274,12 +297,10 @@ %xdefine _BIO_method_type _ %+ BORINGSSL_PREFIX %+ _BIO_method_type %xdefine _BIO_new _ %+ BORINGSSL_PREFIX %+ _BIO_new %xdefine _BIO_new_bio_pair _ %+ BORINGSSL_PREFIX %+ _BIO_new_bio_pair -%xdefine _BIO_new_connect _ %+ BORINGSSL_PREFIX %+ _BIO_new_connect %xdefine _BIO_new_fd _ %+ BORINGSSL_PREFIX %+ _BIO_new_fd %xdefine _BIO_new_file _ %+ BORINGSSL_PREFIX %+ _BIO_new_file %xdefine _BIO_new_fp _ %+ BORINGSSL_PREFIX %+ _BIO_new_fp %xdefine _BIO_new_mem_buf _ %+ BORINGSSL_PREFIX %+ _BIO_new_mem_buf -%xdefine _BIO_new_socket _ %+ BORINGSSL_PREFIX %+ _BIO_new_socket %xdefine _BIO_next _ %+ BORINGSSL_PREFIX %+ _BIO_next %xdefine _BIO_number_read _ %+ BORINGSSL_PREFIX %+ _BIO_number_read %xdefine _BIO_number_written _ %+ BORINGSSL_PREFIX %+ _BIO_number_written @@ -294,16 +315,11 @@ %xdefine _BIO_read_filename _ %+ BORINGSSL_PREFIX %+ _BIO_read_filename %xdefine _BIO_reset _ %+ BORINGSSL_PREFIX %+ _BIO_reset %xdefine _BIO_rw_filename _ %+ BORINGSSL_PREFIX %+ _BIO_rw_filename -%xdefine _BIO_s_connect _ %+ BORINGSSL_PREFIX %+ _BIO_s_connect %xdefine _BIO_s_fd _ %+ BORINGSSL_PREFIX %+ _BIO_s_fd %xdefine _BIO_s_file _ %+ BORINGSSL_PREFIX %+ _BIO_s_file %xdefine _BIO_s_mem _ %+ BORINGSSL_PREFIX %+ _BIO_s_mem -%xdefine _BIO_s_socket _ %+ BORINGSSL_PREFIX %+ _BIO_s_socket %xdefine _BIO_seek _ %+ BORINGSSL_PREFIX %+ _BIO_seek %xdefine _BIO_set_close _ %+ BORINGSSL_PREFIX %+ _BIO_set_close -%xdefine _BIO_set_conn_hostname _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_hostname -%xdefine _BIO_set_conn_int_port _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_int_port -%xdefine _BIO_set_conn_port _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_port %xdefine _BIO_set_data _ %+ BORINGSSL_PREFIX %+ _BIO_set_data %xdefine _BIO_set_ex_data _ %+ BORINGSSL_PREFIX %+ _BIO_set_ex_data %xdefine _BIO_set_fd _ %+ BORINGSSL_PREFIX %+ _BIO_set_fd @@ -312,7 +328,6 @@ %xdefine _BIO_set_init _ %+ BORINGSSL_PREFIX %+ _BIO_set_init %xdefine _BIO_set_mem_buf _ %+ BORINGSSL_PREFIX %+ _BIO_set_mem_buf %xdefine _BIO_set_mem_eof_return _ %+ BORINGSSL_PREFIX %+ _BIO_set_mem_eof_return -%xdefine _BIO_set_nbio _ %+ BORINGSSL_PREFIX %+ _BIO_set_nbio %xdefine _BIO_set_retry_read _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_read %xdefine _BIO_set_retry_reason _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine _BIO_set_retry_special _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_special @@ -636,6 +651,7 @@ %xdefine _CRYPTO_cfb128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cfb128_encrypt %xdefine _CRYPTO_chacha_20 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_chacha_20 %xdefine _CRYPTO_cleanup_all_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cleanup_all_ex_data +%xdefine _CRYPTO_cpu_avoid_zmm_registers _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cpu_avoid_zmm_registers %xdefine _CRYPTO_cpu_perf_is_like_silvermont _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cpu_perf_is_like_silvermont %xdefine _CRYPTO_ctr128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt %xdefine _CRYPTO_ctr128_encrypt_ctr32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 @@ -674,6 +690,8 @@ %xdefine _CRYPTO_is_ARMv8_SHA256_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA256_capable %xdefine _CRYPTO_is_ARMv8_SHA512_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA512_capable %xdefine _CRYPTO_is_AVX2_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX2_capable +%xdefine _CRYPTO_is_AVX512BW_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512BW_capable +%xdefine _CRYPTO_is_AVX512VL_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512VL_capable %xdefine _CRYPTO_is_AVX_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX_capable %xdefine _CRYPTO_is_BMI1_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_BMI1_capable %xdefine _CRYPTO_is_BMI2_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_BMI2_capable @@ -684,6 +702,8 @@ %xdefine _CRYPTO_is_RDRAND_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_RDRAND_capable %xdefine _CRYPTO_is_SSE4_1_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_SSE4_1_capable %xdefine _CRYPTO_is_SSSE3_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_SSSE3_capable +%xdefine _CRYPTO_is_VAES_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_VAES_capable +%xdefine _CRYPTO_is_VPCLMULQDQ_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_VPCLMULQDQ_capable %xdefine _CRYPTO_is_confidential_build _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine _CRYPTO_is_intel_cpu _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_intel_cpu %xdefine _CRYPTO_is_x86_SHA_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_x86_SHA_capable @@ -778,16 +798,6 @@ %xdefine _DH_size _ %+ BORINGSSL_PREFIX %+ _DH_size %xdefine _DH_up_ref _ %+ BORINGSSL_PREFIX %+ _DH_up_ref %xdefine _DHparams_dup _ %+ BORINGSSL_PREFIX %+ _DHparams_dup -%xdefine _DILITHIUM_generate_key _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_generate_key -%xdefine _DILITHIUM_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_generate_key_external_entropy -%xdefine _DILITHIUM_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_marshal_private_key -%xdefine _DILITHIUM_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_marshal_public_key -%xdefine _DILITHIUM_parse_private_key _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_parse_private_key -%xdefine _DILITHIUM_parse_public_key _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_parse_public_key -%xdefine _DILITHIUM_public_from_private _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_public_from_private -%xdefine _DILITHIUM_sign _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_sign -%xdefine _DILITHIUM_sign_deterministic _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_sign_deterministic -%xdefine _DILITHIUM_verify _ %+ BORINGSSL_PREFIX %+ _DILITHIUM_verify %xdefine _DIRECTORYSTRING_free _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_free %xdefine _DIRECTORYSTRING_it _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_it %xdefine _DIRECTORYSTRING_new _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_new @@ -1134,6 +1144,7 @@ %xdefine _EVP_MD_CTX_create _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_create %xdefine _EVP_MD_CTX_destroy _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_destroy %xdefine _EVP_MD_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_free +%xdefine _EVP_MD_CTX_get0_md _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_get0_md %xdefine _EVP_MD_CTX_init _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_init %xdefine _EVP_MD_CTX_md _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_md %xdefine _EVP_MD_CTX_move _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_move @@ -1415,17 +1426,13 @@ %xdefine _METHOD_ref _ %+ BORINGSSL_PREFIX %+ _METHOD_ref %xdefine _METHOD_unref _ %+ BORINGSSL_PREFIX %+ _METHOD_unref %xdefine _MLDSA65_generate_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_generate_key -%xdefine _MLDSA65_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _MLDSA65_generate_key_external_entropy -%xdefine _MLDSA65_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_marshal_private_key %xdefine _MLDSA65_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_marshal_public_key %xdefine _MLDSA65_parse_private_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_parse_private_key %xdefine _MLDSA65_parse_public_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_parse_public_key %xdefine _MLDSA65_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _MLDSA65_private_key_from_seed %xdefine _MLDSA65_public_from_private _ %+ BORINGSSL_PREFIX %+ _MLDSA65_public_from_private %xdefine _MLDSA65_sign _ %+ BORINGSSL_PREFIX %+ _MLDSA65_sign -%xdefine _MLDSA65_sign_internal _ %+ BORINGSSL_PREFIX %+ _MLDSA65_sign_internal %xdefine _MLDSA65_verify _ %+ BORINGSSL_PREFIX %+ _MLDSA65_verify -%xdefine _MLDSA65_verify_internal _ %+ BORINGSSL_PREFIX %+ _MLDSA65_verify_internal %xdefine _MLKEM1024_decap _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_decap %xdefine _MLKEM1024_encap _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_encap %xdefine _MLKEM1024_encap_external_entropy _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_encap_external_entropy @@ -1473,8 +1480,6 @@ %xdefine _NOTICEREF_free _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_free %xdefine _NOTICEREF_it _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_it %xdefine _NOTICEREF_new _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_new -%xdefine _OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER _ %+ BORINGSSL_PREFIX %+ _OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER -%xdefine _OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER _ %+ BORINGSSL_PREFIX %+ _OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER %xdefine _OBJ_cbs2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_cbs2nid %xdefine _OBJ_cleanup _ %+ BORINGSSL_PREFIX %+ _OBJ_cleanup %xdefine _OBJ_cmp _ %+ BORINGSSL_PREFIX %+ _OBJ_cmp @@ -1854,10 +1859,6 @@ %xdefine _SPAKE2_CTX_new _ %+ BORINGSSL_PREFIX %+ _SPAKE2_CTX_new %xdefine _SPAKE2_generate_msg _ %+ BORINGSSL_PREFIX %+ _SPAKE2_generate_msg %xdefine _SPAKE2_process_msg _ %+ BORINGSSL_PREFIX %+ _SPAKE2_process_msg -%xdefine _SPX_generate_key _ %+ BORINGSSL_PREFIX %+ _SPX_generate_key -%xdefine _SPX_generate_key_from_seed _ %+ BORINGSSL_PREFIX %+ _SPX_generate_key_from_seed -%xdefine _SPX_sign _ %+ BORINGSSL_PREFIX %+ _SPX_sign -%xdefine _SPX_verify _ %+ BORINGSSL_PREFIX %+ _SPX_verify %xdefine _SSLeay _ %+ BORINGSSL_PREFIX %+ _SSLeay %xdefine _SSLeay_version _ %+ BORINGSSL_PREFIX %+ _SSLeay_version %xdefine _TRUST_TOKEN_CLIENT_add_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_add_key @@ -2360,7 +2361,11 @@ %xdefine _aes256gcmsiv_kdf _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_kdf %xdefine _aes_ctr_set_key _ %+ BORINGSSL_PREFIX %+ _aes_ctr_set_key %xdefine _aes_gcm_dec_kernel _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_kernel +%xdefine _aes_gcm_dec_update_vaes_avx10_256 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_256 +%xdefine _aes_gcm_dec_update_vaes_avx10_512 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_512 %xdefine _aes_gcm_enc_kernel _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_kernel +%xdefine _aes_gcm_enc_update_vaes_avx10_256 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_256 +%xdefine _aes_gcm_enc_update_vaes_avx10_512 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_512 %xdefine _aes_hw_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_cbc_encrypt %xdefine _aes_hw_ctr32_encrypt_blocks _ %+ BORINGSSL_PREFIX %+ _aes_hw_ctr32_encrypt_blocks %xdefine _aes_hw_decrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_decrypt @@ -2403,13 +2408,10 @@ %xdefine _asn1_type_set0_string _ %+ BORINGSSL_PREFIX %+ _asn1_type_set0_string %xdefine _asn1_type_value_as_pointer _ %+ BORINGSSL_PREFIX %+ _asn1_type_value_as_pointer %xdefine _asn1_utctime_to_tm _ %+ BORINGSSL_PREFIX %+ _asn1_utctime_to_tm +%xdefine _bcm_as_approved_status _ %+ BORINGSSL_PREFIX %+ _bcm_as_approved_status +%xdefine _bcm_success _ %+ BORINGSSL_PREFIX %+ _bcm_success %xdefine _beeu_mod_inverse_vartime _ %+ BORINGSSL_PREFIX %+ _beeu_mod_inverse_vartime -%xdefine _bio_clear_socket_error _ %+ BORINGSSL_PREFIX %+ _bio_clear_socket_error %xdefine _bio_errno_should_retry _ %+ BORINGSSL_PREFIX %+ _bio_errno_should_retry -%xdefine _bio_ip_and_port_to_socket_and_addr _ %+ BORINGSSL_PREFIX %+ _bio_ip_and_port_to_socket_and_addr -%xdefine _bio_sock_error _ %+ BORINGSSL_PREFIX %+ _bio_sock_error -%xdefine _bio_socket_nbio _ %+ BORINGSSL_PREFIX %+ _bio_socket_nbio -%xdefine _bio_socket_should_retry _ %+ BORINGSSL_PREFIX %+ _bio_socket_should_retry %xdefine _bn_abs_sub_consttime _ %+ BORINGSSL_PREFIX %+ _bn_abs_sub_consttime %xdefine _bn_add_words _ %+ BORINGSSL_PREFIX %+ _bn_add_words %xdefine _bn_assert_fits_in_bytes _ %+ BORINGSSL_PREFIX %+ _bn_assert_fits_in_bytes @@ -2772,18 +2774,22 @@ %xdefine _gcm_ghash_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_nohw %xdefine _gcm_ghash_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_ssse3 %xdefine _gcm_ghash_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_v8 +%xdefine _gcm_ghash_vpclmulqdq_avx10_256 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_256 +%xdefine _gcm_ghash_vpclmulqdq_avx10_512 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_512 %xdefine _gcm_gmult_avx _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_avx %xdefine _gcm_gmult_clmul _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_clmul %xdefine _gcm_gmult_neon _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_neon %xdefine _gcm_gmult_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_nohw %xdefine _gcm_gmult_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_ssse3 %xdefine _gcm_gmult_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_v8 +%xdefine _gcm_gmult_vpclmulqdq_avx10 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx10 %xdefine _gcm_init_avx _ %+ BORINGSSL_PREFIX %+ _gcm_init_avx %xdefine _gcm_init_clmul _ %+ BORINGSSL_PREFIX %+ _gcm_init_clmul %xdefine _gcm_init_neon _ %+ BORINGSSL_PREFIX %+ _gcm_init_neon %xdefine _gcm_init_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_init_nohw %xdefine _gcm_init_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_init_ssse3 %xdefine _gcm_init_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_init_v8 +%xdefine _gcm_init_vpclmulqdq_avx10 _ %+ BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx10 %xdefine _gcm_neon_capable _ %+ BORINGSSL_PREFIX %+ _gcm_neon_capable %xdefine _gcm_pmull_capable _ %+ BORINGSSL_PREFIX %+ _gcm_pmull_capable %xdefine _have_fast_rdrand _ %+ BORINGSSL_PREFIX %+ _have_fast_rdrand @@ -3265,39 +3271,6 @@ %xdefine _slhdsa_wots_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_wots_sign %xdefine _slhdsa_xmss_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _slhdsa_xmss_pk_from_sig %xdefine _slhdsa_xmss_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_xmss_sign -%xdefine _spx_base_b _ %+ BORINGSSL_PREFIX %+ _spx_base_b -%xdefine _spx_copy_keypair_addr _ %+ BORINGSSL_PREFIX %+ _spx_copy_keypair_addr -%xdefine _spx_fors_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _spx_fors_pk_from_sig -%xdefine _spx_fors_sign _ %+ BORINGSSL_PREFIX %+ _spx_fors_sign -%xdefine _spx_fors_sk_gen _ %+ BORINGSSL_PREFIX %+ _spx_fors_sk_gen -%xdefine _spx_fors_treehash _ %+ BORINGSSL_PREFIX %+ _spx_fors_treehash -%xdefine _spx_get_tree_index _ %+ BORINGSSL_PREFIX %+ _spx_get_tree_index -%xdefine _spx_ht_sign _ %+ BORINGSSL_PREFIX %+ _spx_ht_sign -%xdefine _spx_ht_verify _ %+ BORINGSSL_PREFIX %+ _spx_ht_verify -%xdefine _spx_set_chain_addr _ %+ BORINGSSL_PREFIX %+ _spx_set_chain_addr -%xdefine _spx_set_hash_addr _ %+ BORINGSSL_PREFIX %+ _spx_set_hash_addr -%xdefine _spx_set_keypair_addr _ %+ BORINGSSL_PREFIX %+ _spx_set_keypair_addr -%xdefine _spx_set_layer_addr _ %+ BORINGSSL_PREFIX %+ _spx_set_layer_addr -%xdefine _spx_set_tree_addr _ %+ BORINGSSL_PREFIX %+ _spx_set_tree_addr -%xdefine _spx_set_tree_height _ %+ BORINGSSL_PREFIX %+ _spx_set_tree_height -%xdefine _spx_set_tree_index _ %+ BORINGSSL_PREFIX %+ _spx_set_tree_index -%xdefine _spx_set_type _ %+ BORINGSSL_PREFIX %+ _spx_set_type -%xdefine _spx_thash_f _ %+ BORINGSSL_PREFIX %+ _spx_thash_f -%xdefine _spx_thash_h _ %+ BORINGSSL_PREFIX %+ _spx_thash_h -%xdefine _spx_thash_hmsg _ %+ BORINGSSL_PREFIX %+ _spx_thash_hmsg -%xdefine _spx_thash_prf _ %+ BORINGSSL_PREFIX %+ _spx_thash_prf -%xdefine _spx_thash_prfmsg _ %+ BORINGSSL_PREFIX %+ _spx_thash_prfmsg -%xdefine _spx_thash_tk _ %+ BORINGSSL_PREFIX %+ _spx_thash_tk -%xdefine _spx_thash_tl _ %+ BORINGSSL_PREFIX %+ _spx_thash_tl -%xdefine _spx_to_uint64 _ %+ BORINGSSL_PREFIX %+ _spx_to_uint64 -%xdefine _spx_treehash _ %+ BORINGSSL_PREFIX %+ _spx_treehash -%xdefine _spx_uint64_to_len_bytes _ %+ BORINGSSL_PREFIX %+ _spx_uint64_to_len_bytes -%xdefine _spx_wots_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _spx_wots_pk_from_sig -%xdefine _spx_wots_pk_gen _ %+ BORINGSSL_PREFIX %+ _spx_wots_pk_gen -%xdefine _spx_wots_sign _ %+ BORINGSSL_PREFIX %+ _spx_wots_sign -%xdefine _spx_xmss_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _spx_xmss_pk_from_sig -%xdefine _spx_xmss_sign _ %+ BORINGSSL_PREFIX %+ _spx_xmss_sign -%xdefine _swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE _ %+ BORINGSSL_PREFIX %+ _swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE %xdefine _v2i_GENERAL_NAME _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME %xdefine _v2i_GENERAL_NAMES _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAMES %xdefine _v2i_GENERAL_NAME_ex _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME_ex @@ -3570,6 +3543,30 @@ %xdefine BASIC_CONSTRAINTS_it BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_it %xdefine BASIC_CONSTRAINTS_new BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_new %xdefine BCM_fips_186_2_prf BORINGSSL_PREFIX %+ _BCM_fips_186_2_prf +%xdefine BCM_mldsa65_generate_key BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key +%xdefine BCM_mldsa65_generate_key_external_entropy BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key_external_entropy +%xdefine BCM_mldsa65_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_private_key +%xdefine BCM_mldsa65_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_public_key +%xdefine BCM_mldsa65_parse_private_key BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_private_key +%xdefine BCM_mldsa65_parse_public_key BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_public_key +%xdefine BCM_mldsa65_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mldsa65_private_key_from_seed +%xdefine BCM_mldsa65_public_from_private BORINGSSL_PREFIX %+ _BCM_mldsa65_public_from_private +%xdefine BCM_mldsa65_sign BORINGSSL_PREFIX %+ _BCM_mldsa65_sign +%xdefine BCM_mldsa65_sign_internal BORINGSSL_PREFIX %+ _BCM_mldsa65_sign_internal +%xdefine BCM_mldsa65_verify BORINGSSL_PREFIX %+ _BCM_mldsa65_verify +%xdefine BCM_mldsa65_verify_internal BORINGSSL_PREFIX %+ _BCM_mldsa65_verify_internal +%xdefine BCM_mldsa87_generate_key BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key +%xdefine BCM_mldsa87_generate_key_external_entropy BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key_external_entropy +%xdefine BCM_mldsa87_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_private_key +%xdefine BCM_mldsa87_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_public_key +%xdefine BCM_mldsa87_parse_private_key BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_private_key +%xdefine BCM_mldsa87_parse_public_key BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_public_key +%xdefine BCM_mldsa87_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mldsa87_private_key_from_seed +%xdefine BCM_mldsa87_public_from_private BORINGSSL_PREFIX %+ _BCM_mldsa87_public_from_private +%xdefine BCM_mldsa87_sign BORINGSSL_PREFIX %+ _BCM_mldsa87_sign +%xdefine BCM_mldsa87_sign_internal BORINGSSL_PREFIX %+ _BCM_mldsa87_sign_internal +%xdefine BCM_mldsa87_verify BORINGSSL_PREFIX %+ _BCM_mldsa87_verify +%xdefine BCM_mldsa87_verify_internal BORINGSSL_PREFIX %+ _BCM_mldsa87_verify_internal %xdefine BCM_rand_bytes BORINGSSL_PREFIX %+ _BCM_rand_bytes %xdefine BCM_rand_bytes_hwrng BORINGSSL_PREFIX %+ _BCM_rand_bytes_hwrng %xdefine BCM_rand_bytes_with_additional_data BORINGSSL_PREFIX %+ _BCM_rand_bytes_with_additional_data @@ -3604,7 +3601,6 @@ %xdefine BIO_ctrl_get_read_request BORINGSSL_PREFIX %+ _BIO_ctrl_get_read_request %xdefine BIO_ctrl_get_write_guarantee BORINGSSL_PREFIX %+ _BIO_ctrl_get_write_guarantee %xdefine BIO_ctrl_pending BORINGSSL_PREFIX %+ _BIO_ctrl_pending -%xdefine BIO_do_connect BORINGSSL_PREFIX %+ _BIO_do_connect %xdefine BIO_eof BORINGSSL_PREFIX %+ _BIO_eof %xdefine BIO_find_type BORINGSSL_PREFIX %+ _BIO_find_type %xdefine BIO_flush BORINGSSL_PREFIX %+ _BIO_flush @@ -3639,12 +3635,10 @@ %xdefine BIO_method_type BORINGSSL_PREFIX %+ _BIO_method_type %xdefine BIO_new BORINGSSL_PREFIX %+ _BIO_new %xdefine BIO_new_bio_pair BORINGSSL_PREFIX %+ _BIO_new_bio_pair -%xdefine BIO_new_connect BORINGSSL_PREFIX %+ _BIO_new_connect %xdefine BIO_new_fd BORINGSSL_PREFIX %+ _BIO_new_fd %xdefine BIO_new_file BORINGSSL_PREFIX %+ _BIO_new_file %xdefine BIO_new_fp BORINGSSL_PREFIX %+ _BIO_new_fp %xdefine BIO_new_mem_buf BORINGSSL_PREFIX %+ _BIO_new_mem_buf -%xdefine BIO_new_socket BORINGSSL_PREFIX %+ _BIO_new_socket %xdefine BIO_next BORINGSSL_PREFIX %+ _BIO_next %xdefine BIO_number_read BORINGSSL_PREFIX %+ _BIO_number_read %xdefine BIO_number_written BORINGSSL_PREFIX %+ _BIO_number_written @@ -3659,16 +3653,11 @@ %xdefine BIO_read_filename BORINGSSL_PREFIX %+ _BIO_read_filename %xdefine BIO_reset BORINGSSL_PREFIX %+ _BIO_reset %xdefine BIO_rw_filename BORINGSSL_PREFIX %+ _BIO_rw_filename -%xdefine BIO_s_connect BORINGSSL_PREFIX %+ _BIO_s_connect %xdefine BIO_s_fd BORINGSSL_PREFIX %+ _BIO_s_fd %xdefine BIO_s_file BORINGSSL_PREFIX %+ _BIO_s_file %xdefine BIO_s_mem BORINGSSL_PREFIX %+ _BIO_s_mem -%xdefine BIO_s_socket BORINGSSL_PREFIX %+ _BIO_s_socket %xdefine BIO_seek BORINGSSL_PREFIX %+ _BIO_seek %xdefine BIO_set_close BORINGSSL_PREFIX %+ _BIO_set_close -%xdefine BIO_set_conn_hostname BORINGSSL_PREFIX %+ _BIO_set_conn_hostname -%xdefine BIO_set_conn_int_port BORINGSSL_PREFIX %+ _BIO_set_conn_int_port -%xdefine BIO_set_conn_port BORINGSSL_PREFIX %+ _BIO_set_conn_port %xdefine BIO_set_data BORINGSSL_PREFIX %+ _BIO_set_data %xdefine BIO_set_ex_data BORINGSSL_PREFIX %+ _BIO_set_ex_data %xdefine BIO_set_fd BORINGSSL_PREFIX %+ _BIO_set_fd @@ -3677,7 +3666,6 @@ %xdefine BIO_set_init BORINGSSL_PREFIX %+ _BIO_set_init %xdefine BIO_set_mem_buf BORINGSSL_PREFIX %+ _BIO_set_mem_buf %xdefine BIO_set_mem_eof_return BORINGSSL_PREFIX %+ _BIO_set_mem_eof_return -%xdefine BIO_set_nbio BORINGSSL_PREFIX %+ _BIO_set_nbio %xdefine BIO_set_retry_read BORINGSSL_PREFIX %+ _BIO_set_retry_read %xdefine BIO_set_retry_reason BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine BIO_set_retry_special BORINGSSL_PREFIX %+ _BIO_set_retry_special @@ -4001,6 +3989,7 @@ %xdefine CRYPTO_cfb128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_cfb128_encrypt %xdefine CRYPTO_chacha_20 BORINGSSL_PREFIX %+ _CRYPTO_chacha_20 %xdefine CRYPTO_cleanup_all_ex_data BORINGSSL_PREFIX %+ _CRYPTO_cleanup_all_ex_data +%xdefine CRYPTO_cpu_avoid_zmm_registers BORINGSSL_PREFIX %+ _CRYPTO_cpu_avoid_zmm_registers %xdefine CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_PREFIX %+ _CRYPTO_cpu_perf_is_like_silvermont %xdefine CRYPTO_ctr128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt %xdefine CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 @@ -4039,6 +4028,8 @@ %xdefine CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA256_capable %xdefine CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA512_capable %xdefine CRYPTO_is_AVX2_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX2_capable +%xdefine CRYPTO_is_AVX512BW_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512BW_capable +%xdefine CRYPTO_is_AVX512VL_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512VL_capable %xdefine CRYPTO_is_AVX_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX_capable %xdefine CRYPTO_is_BMI1_capable BORINGSSL_PREFIX %+ _CRYPTO_is_BMI1_capable %xdefine CRYPTO_is_BMI2_capable BORINGSSL_PREFIX %+ _CRYPTO_is_BMI2_capable @@ -4049,6 +4040,8 @@ %xdefine CRYPTO_is_RDRAND_capable BORINGSSL_PREFIX %+ _CRYPTO_is_RDRAND_capable %xdefine CRYPTO_is_SSE4_1_capable BORINGSSL_PREFIX %+ _CRYPTO_is_SSE4_1_capable %xdefine CRYPTO_is_SSSE3_capable BORINGSSL_PREFIX %+ _CRYPTO_is_SSSE3_capable +%xdefine CRYPTO_is_VAES_capable BORINGSSL_PREFIX %+ _CRYPTO_is_VAES_capable +%xdefine CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_PREFIX %+ _CRYPTO_is_VPCLMULQDQ_capable %xdefine CRYPTO_is_confidential_build BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine CRYPTO_is_intel_cpu BORINGSSL_PREFIX %+ _CRYPTO_is_intel_cpu %xdefine CRYPTO_is_x86_SHA_capable BORINGSSL_PREFIX %+ _CRYPTO_is_x86_SHA_capable @@ -4143,16 +4136,6 @@ %xdefine DH_size BORINGSSL_PREFIX %+ _DH_size %xdefine DH_up_ref BORINGSSL_PREFIX %+ _DH_up_ref %xdefine DHparams_dup BORINGSSL_PREFIX %+ _DHparams_dup -%xdefine DILITHIUM_generate_key BORINGSSL_PREFIX %+ _DILITHIUM_generate_key -%xdefine DILITHIUM_generate_key_external_entropy BORINGSSL_PREFIX %+ _DILITHIUM_generate_key_external_entropy -%xdefine DILITHIUM_marshal_private_key BORINGSSL_PREFIX %+ _DILITHIUM_marshal_private_key -%xdefine DILITHIUM_marshal_public_key BORINGSSL_PREFIX %+ _DILITHIUM_marshal_public_key -%xdefine DILITHIUM_parse_private_key BORINGSSL_PREFIX %+ _DILITHIUM_parse_private_key -%xdefine DILITHIUM_parse_public_key BORINGSSL_PREFIX %+ _DILITHIUM_parse_public_key -%xdefine DILITHIUM_public_from_private BORINGSSL_PREFIX %+ _DILITHIUM_public_from_private -%xdefine DILITHIUM_sign BORINGSSL_PREFIX %+ _DILITHIUM_sign -%xdefine DILITHIUM_sign_deterministic BORINGSSL_PREFIX %+ _DILITHIUM_sign_deterministic -%xdefine DILITHIUM_verify BORINGSSL_PREFIX %+ _DILITHIUM_verify %xdefine DIRECTORYSTRING_free BORINGSSL_PREFIX %+ _DIRECTORYSTRING_free %xdefine DIRECTORYSTRING_it BORINGSSL_PREFIX %+ _DIRECTORYSTRING_it %xdefine DIRECTORYSTRING_new BORINGSSL_PREFIX %+ _DIRECTORYSTRING_new @@ -4499,6 +4482,7 @@ %xdefine EVP_MD_CTX_create BORINGSSL_PREFIX %+ _EVP_MD_CTX_create %xdefine EVP_MD_CTX_destroy BORINGSSL_PREFIX %+ _EVP_MD_CTX_destroy %xdefine EVP_MD_CTX_free BORINGSSL_PREFIX %+ _EVP_MD_CTX_free +%xdefine EVP_MD_CTX_get0_md BORINGSSL_PREFIX %+ _EVP_MD_CTX_get0_md %xdefine EVP_MD_CTX_init BORINGSSL_PREFIX %+ _EVP_MD_CTX_init %xdefine EVP_MD_CTX_md BORINGSSL_PREFIX %+ _EVP_MD_CTX_md %xdefine EVP_MD_CTX_move BORINGSSL_PREFIX %+ _EVP_MD_CTX_move @@ -4780,17 +4764,13 @@ %xdefine METHOD_ref BORINGSSL_PREFIX %+ _METHOD_ref %xdefine METHOD_unref BORINGSSL_PREFIX %+ _METHOD_unref %xdefine MLDSA65_generate_key BORINGSSL_PREFIX %+ _MLDSA65_generate_key -%xdefine MLDSA65_generate_key_external_entropy BORINGSSL_PREFIX %+ _MLDSA65_generate_key_external_entropy -%xdefine MLDSA65_marshal_private_key BORINGSSL_PREFIX %+ _MLDSA65_marshal_private_key %xdefine MLDSA65_marshal_public_key BORINGSSL_PREFIX %+ _MLDSA65_marshal_public_key %xdefine MLDSA65_parse_private_key BORINGSSL_PREFIX %+ _MLDSA65_parse_private_key %xdefine MLDSA65_parse_public_key BORINGSSL_PREFIX %+ _MLDSA65_parse_public_key %xdefine MLDSA65_private_key_from_seed BORINGSSL_PREFIX %+ _MLDSA65_private_key_from_seed %xdefine MLDSA65_public_from_private BORINGSSL_PREFIX %+ _MLDSA65_public_from_private %xdefine MLDSA65_sign BORINGSSL_PREFIX %+ _MLDSA65_sign -%xdefine MLDSA65_sign_internal BORINGSSL_PREFIX %+ _MLDSA65_sign_internal %xdefine MLDSA65_verify BORINGSSL_PREFIX %+ _MLDSA65_verify -%xdefine MLDSA65_verify_internal BORINGSSL_PREFIX %+ _MLDSA65_verify_internal %xdefine MLKEM1024_decap BORINGSSL_PREFIX %+ _MLKEM1024_decap %xdefine MLKEM1024_encap BORINGSSL_PREFIX %+ _MLKEM1024_encap %xdefine MLKEM1024_encap_external_entropy BORINGSSL_PREFIX %+ _MLKEM1024_encap_external_entropy @@ -4838,8 +4818,6 @@ %xdefine NOTICEREF_free BORINGSSL_PREFIX %+ _NOTICEREF_free %xdefine NOTICEREF_it BORINGSSL_PREFIX %+ _NOTICEREF_it %xdefine NOTICEREF_new BORINGSSL_PREFIX %+ _NOTICEREF_new -%xdefine OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_PREFIX %+ _OBJC_CLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER -%xdefine OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER BORINGSSL_PREFIX %+ _OBJC_METACLASS_$_swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLER_FINDER %xdefine OBJ_cbs2nid BORINGSSL_PREFIX %+ _OBJ_cbs2nid %xdefine OBJ_cleanup BORINGSSL_PREFIX %+ _OBJ_cleanup %xdefine OBJ_cmp BORINGSSL_PREFIX %+ _OBJ_cmp @@ -5219,10 +5197,6 @@ %xdefine SPAKE2_CTX_new BORINGSSL_PREFIX %+ _SPAKE2_CTX_new %xdefine SPAKE2_generate_msg BORINGSSL_PREFIX %+ _SPAKE2_generate_msg %xdefine SPAKE2_process_msg BORINGSSL_PREFIX %+ _SPAKE2_process_msg -%xdefine SPX_generate_key BORINGSSL_PREFIX %+ _SPX_generate_key -%xdefine SPX_generate_key_from_seed BORINGSSL_PREFIX %+ _SPX_generate_key_from_seed -%xdefine SPX_sign BORINGSSL_PREFIX %+ _SPX_sign -%xdefine SPX_verify BORINGSSL_PREFIX %+ _SPX_verify %xdefine SSLeay BORINGSSL_PREFIX %+ _SSLeay %xdefine SSLeay_version BORINGSSL_PREFIX %+ _SSLeay_version %xdefine TRUST_TOKEN_CLIENT_add_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_add_key @@ -5725,7 +5699,11 @@ %xdefine aes256gcmsiv_kdf BORINGSSL_PREFIX %+ _aes256gcmsiv_kdf %xdefine aes_ctr_set_key BORINGSSL_PREFIX %+ _aes_ctr_set_key %xdefine aes_gcm_dec_kernel BORINGSSL_PREFIX %+ _aes_gcm_dec_kernel +%xdefine aes_gcm_dec_update_vaes_avx10_256 BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_256 +%xdefine aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_512 %xdefine aes_gcm_enc_kernel BORINGSSL_PREFIX %+ _aes_gcm_enc_kernel +%xdefine aes_gcm_enc_update_vaes_avx10_256 BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_256 +%xdefine aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_512 %xdefine aes_hw_cbc_encrypt BORINGSSL_PREFIX %+ _aes_hw_cbc_encrypt %xdefine aes_hw_ctr32_encrypt_blocks BORINGSSL_PREFIX %+ _aes_hw_ctr32_encrypt_blocks %xdefine aes_hw_decrypt BORINGSSL_PREFIX %+ _aes_hw_decrypt @@ -5768,13 +5746,10 @@ %xdefine asn1_type_set0_string BORINGSSL_PREFIX %+ _asn1_type_set0_string %xdefine asn1_type_value_as_pointer BORINGSSL_PREFIX %+ _asn1_type_value_as_pointer %xdefine asn1_utctime_to_tm BORINGSSL_PREFIX %+ _asn1_utctime_to_tm +%xdefine bcm_as_approved_status BORINGSSL_PREFIX %+ _bcm_as_approved_status +%xdefine bcm_success BORINGSSL_PREFIX %+ _bcm_success %xdefine beeu_mod_inverse_vartime BORINGSSL_PREFIX %+ _beeu_mod_inverse_vartime -%xdefine bio_clear_socket_error BORINGSSL_PREFIX %+ _bio_clear_socket_error %xdefine bio_errno_should_retry BORINGSSL_PREFIX %+ _bio_errno_should_retry -%xdefine bio_ip_and_port_to_socket_and_addr BORINGSSL_PREFIX %+ _bio_ip_and_port_to_socket_and_addr -%xdefine bio_sock_error BORINGSSL_PREFIX %+ _bio_sock_error -%xdefine bio_socket_nbio BORINGSSL_PREFIX %+ _bio_socket_nbio -%xdefine bio_socket_should_retry BORINGSSL_PREFIX %+ _bio_socket_should_retry %xdefine bn_abs_sub_consttime BORINGSSL_PREFIX %+ _bn_abs_sub_consttime %xdefine bn_add_words BORINGSSL_PREFIX %+ _bn_add_words %xdefine bn_assert_fits_in_bytes BORINGSSL_PREFIX %+ _bn_assert_fits_in_bytes @@ -6137,18 +6112,22 @@ %xdefine gcm_ghash_nohw BORINGSSL_PREFIX %+ _gcm_ghash_nohw %xdefine gcm_ghash_ssse3 BORINGSSL_PREFIX %+ _gcm_ghash_ssse3 %xdefine gcm_ghash_v8 BORINGSSL_PREFIX %+ _gcm_ghash_v8 +%xdefine gcm_ghash_vpclmulqdq_avx10_256 BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_256 +%xdefine gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_512 %xdefine gcm_gmult_avx BORINGSSL_PREFIX %+ _gcm_gmult_avx %xdefine gcm_gmult_clmul BORINGSSL_PREFIX %+ _gcm_gmult_clmul %xdefine gcm_gmult_neon BORINGSSL_PREFIX %+ _gcm_gmult_neon %xdefine gcm_gmult_nohw BORINGSSL_PREFIX %+ _gcm_gmult_nohw %xdefine gcm_gmult_ssse3 BORINGSSL_PREFIX %+ _gcm_gmult_ssse3 %xdefine gcm_gmult_v8 BORINGSSL_PREFIX %+ _gcm_gmult_v8 +%xdefine gcm_gmult_vpclmulqdq_avx10 BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx10 %xdefine gcm_init_avx BORINGSSL_PREFIX %+ _gcm_init_avx %xdefine gcm_init_clmul BORINGSSL_PREFIX %+ _gcm_init_clmul %xdefine gcm_init_neon BORINGSSL_PREFIX %+ _gcm_init_neon %xdefine gcm_init_nohw BORINGSSL_PREFIX %+ _gcm_init_nohw %xdefine gcm_init_ssse3 BORINGSSL_PREFIX %+ _gcm_init_ssse3 %xdefine gcm_init_v8 BORINGSSL_PREFIX %+ _gcm_init_v8 +%xdefine gcm_init_vpclmulqdq_avx10 BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx10 %xdefine gcm_neon_capable BORINGSSL_PREFIX %+ _gcm_neon_capable %xdefine gcm_pmull_capable BORINGSSL_PREFIX %+ _gcm_pmull_capable %xdefine have_fast_rdrand BORINGSSL_PREFIX %+ _have_fast_rdrand @@ -6630,39 +6609,6 @@ %xdefine slhdsa_wots_sign BORINGSSL_PREFIX %+ _slhdsa_wots_sign %xdefine slhdsa_xmss_pk_from_sig BORINGSSL_PREFIX %+ _slhdsa_xmss_pk_from_sig %xdefine slhdsa_xmss_sign BORINGSSL_PREFIX %+ _slhdsa_xmss_sign -%xdefine spx_base_b BORINGSSL_PREFIX %+ _spx_base_b -%xdefine spx_copy_keypair_addr BORINGSSL_PREFIX %+ _spx_copy_keypair_addr -%xdefine spx_fors_pk_from_sig BORINGSSL_PREFIX %+ _spx_fors_pk_from_sig -%xdefine spx_fors_sign BORINGSSL_PREFIX %+ _spx_fors_sign -%xdefine spx_fors_sk_gen BORINGSSL_PREFIX %+ _spx_fors_sk_gen -%xdefine spx_fors_treehash BORINGSSL_PREFIX %+ _spx_fors_treehash -%xdefine spx_get_tree_index BORINGSSL_PREFIX %+ _spx_get_tree_index -%xdefine spx_ht_sign BORINGSSL_PREFIX %+ _spx_ht_sign -%xdefine spx_ht_verify BORINGSSL_PREFIX %+ _spx_ht_verify -%xdefine spx_set_chain_addr BORINGSSL_PREFIX %+ _spx_set_chain_addr -%xdefine spx_set_hash_addr BORINGSSL_PREFIX %+ _spx_set_hash_addr -%xdefine spx_set_keypair_addr BORINGSSL_PREFIX %+ _spx_set_keypair_addr -%xdefine spx_set_layer_addr BORINGSSL_PREFIX %+ _spx_set_layer_addr -%xdefine spx_set_tree_addr BORINGSSL_PREFIX %+ _spx_set_tree_addr -%xdefine spx_set_tree_height BORINGSSL_PREFIX %+ _spx_set_tree_height -%xdefine spx_set_tree_index BORINGSSL_PREFIX %+ _spx_set_tree_index -%xdefine spx_set_type BORINGSSL_PREFIX %+ _spx_set_type -%xdefine spx_thash_f BORINGSSL_PREFIX %+ _spx_thash_f -%xdefine spx_thash_h BORINGSSL_PREFIX %+ _spx_thash_h -%xdefine spx_thash_hmsg BORINGSSL_PREFIX %+ _spx_thash_hmsg -%xdefine spx_thash_prf BORINGSSL_PREFIX %+ _spx_thash_prf -%xdefine spx_thash_prfmsg BORINGSSL_PREFIX %+ _spx_thash_prfmsg -%xdefine spx_thash_tk BORINGSSL_PREFIX %+ _spx_thash_tk -%xdefine spx_thash_tl BORINGSSL_PREFIX %+ _spx_thash_tl -%xdefine spx_to_uint64 BORINGSSL_PREFIX %+ _spx_to_uint64 -%xdefine spx_treehash BORINGSSL_PREFIX %+ _spx_treehash -%xdefine spx_uint64_to_len_bytes BORINGSSL_PREFIX %+ _spx_uint64_to_len_bytes -%xdefine spx_wots_pk_from_sig BORINGSSL_PREFIX %+ _spx_wots_pk_from_sig -%xdefine spx_wots_pk_gen BORINGSSL_PREFIX %+ _spx_wots_pk_gen -%xdefine spx_wots_sign BORINGSSL_PREFIX %+ _spx_wots_sign -%xdefine spx_xmss_pk_from_sig BORINGSSL_PREFIX %+ _spx_xmss_pk_from_sig -%xdefine spx_xmss_sign BORINGSSL_PREFIX %+ _spx_xmss_sign -%xdefine swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE BORINGSSL_PREFIX %+ _swift_crypto_CCryptoBoringSSL_SWIFTPM_MODULE_BUNDLE %xdefine v2i_GENERAL_NAME BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME %xdefine v2i_GENERAL_NAMES BORINGSSL_PREFIX %+ _v2i_GENERAL_NAMES %xdefine v2i_GENERAL_NAME_ex BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME_ex diff --git a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_dilithium.h b/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_dilithium.h deleted file mode 100644 index 5abd7387..00000000 --- a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_dilithium.h +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_DILITHIUM_H -#define OPENSSL_HEADER_DILITHIUM_H - -#include "CCryptoBoringSSL_base.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -#if defined(OPENSSL_UNSTABLE_EXPERIMENTAL_DILITHIUM) -// The ML-DSA spec has now been standardized and ML-DSA is available in -// BoringSSL. This code should no longer be used. It was intended for -// short-lived experiments and must not have been deployed anywhere durable. If -// you were using this you need to use the instead. This -// header and code will be removed from BoringSSL soon. - -// Dilithium3. - -// DILITHIUM_private_key contains a Dilithium3 private key. The contents of this -// object should never leave the address space since the format is unstable. -struct DILITHIUM_private_key { - union { - uint8_t bytes[32 + 32 + 64 + 256 * 4 * (5 + 6 + 6)]; - uint32_t alignment; - } opaque; -}; - -// DILITHIUM_public_key contains a Dilithium3 public key. The contents of this -// object should never leave the address space since the format is unstable. -struct DILITHIUM_public_key { - union { - uint8_t bytes[32 + 64 + 256 * 4 * 6]; - uint32_t alignment; - } opaque; -}; - -// DILITHIUM_PRIVATE_KEY_BYTES is the number of bytes in an encoded Dilithium3 -// private key. -#define DILITHIUM_PRIVATE_KEY_BYTES 4032 - -// DILITHIUM_PUBLIC_KEY_BYTES is the number of bytes in an encoded Dilithium3 -// public key. -#define DILITHIUM_PUBLIC_KEY_BYTES 1952 - -// DILITHIUM_SIGNATURE_BYTES is the number of bytes in an encoded Dilithium3 -// signature. -#define DILITHIUM_SIGNATURE_BYTES 3309 - -// DILITHIUM_generate_key generates a random public/private key pair, writes the -// encoded public key to |out_encoded_public_key| and sets |out_private_key| to -// the private key. Returns 1 on success and 0 on failure. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_generate_key( - uint8_t out_encoded_public_key[DILITHIUM_PUBLIC_KEY_BYTES], - struct DILITHIUM_private_key *out_private_key); - -// DILITHIUM_public_from_private sets |*out_public_key| to the public key that -// corresponds to |private_key|. Returns 1 on success and 0 on failure. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_public_from_private( - struct DILITHIUM_public_key *out_public_key, - const struct DILITHIUM_private_key *private_key); - -// DILITHIUM_sign generates a signature for the message |msg| of length -// |msg_len| using |private_key| following the randomized algorithm, and writes -// the encoded signature to |out_encoded_signature|. Returns 1 on success and 0 -// on failure. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_sign( - uint8_t out_encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const struct DILITHIUM_private_key *private_key, const uint8_t *msg, - size_t msg_len); - -// DILITHIUM_verify verifies that |encoded_signature| constitutes a valid -// signature for the message |msg| of length |msg_len| using |public_key|. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_verify( - const struct DILITHIUM_public_key *public_key, - const uint8_t encoded_signature[DILITHIUM_SIGNATURE_BYTES], - const uint8_t *msg, size_t msg_len); - - -// Serialisation of keys. - -// DILITHIUM_marshal_public_key serializes |public_key| to |out| in the standard -// format for Dilithium public keys. It returns one on success or zero on -// allocation error. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_marshal_public_key( - CBB *out, const struct DILITHIUM_public_key *public_key); - -// DILITHIUM_parse_public_key parses a public key, in the format generated by -// |DILITHIUM_marshal_public_key|, from |in| and writes the result to -// |out_public_key|. It returns one on success or zero on parse error or if -// there are trailing bytes in |in|. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_parse_public_key( - struct DILITHIUM_public_key *public_key, CBS *in); - -// DILITHIUM_marshal_private_key serializes |private_key| to |out| in the -// standard format for Dilithium private keys. It returns one on success or zero -// on allocation error. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_marshal_private_key( - CBB *out, const struct DILITHIUM_private_key *private_key); - -// DILITHIUM_parse_private_key parses a private key, in the format generated by -// |DILITHIUM_marshal_private_key|, from |in| and writes the result to -// |out_private_key|. It returns one on success or zero on parse error or if -// there are trailing bytes in |in|. -OPENSSL_EXPORT OPENSSL_DEPRECATED int DILITHIUM_parse_private_key( - struct DILITHIUM_private_key *private_key, CBS *in); - -#endif // OPENSSL_UNSTABLE_EXPERIMENTAL_DILITHIUM - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_DILITHIUM_H diff --git a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_kyber.h b/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_kyber.h index b9c969f9..a2ac623b 100644 --- a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_kyber.h +++ b/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_kyber.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, Google Inc. +/* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_spx.h b/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_spx.h deleted file mode 100644 index 798831c2..00000000 --- a/Sources/CCryptoBoringSSL/include/experimental/CCryptoBoringSSL_spx.h +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2023, Google LLC - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_SPX_H -#define OPENSSL_HEADER_SPX_H - -#include "CCryptoBoringSSL_base.h" - -#if defined(__cplusplus) -extern "C" { -#endif - - -#if defined(OPENSSL_UNSTABLE_EXPERIMENTAL_SPX) -// This header implements experimental, draft versions of not-yet-standardized -// primitives. When the standard is complete, these functions will be removed -// and replaced with the final, incompatible standard version. They are -// available now for short-lived experiments, but must not be deployed anywhere -// durable, such as a long-lived key store. To use these functions define -// OPENSSL_UNSTABLE_EXPERIMENTAL_SPX - -// SPX_N is the number of bytes in the hash output -#define SPX_N 16 - -// SPX_PUBLIC_KEY_BYTES is the nNumber of bytes in the public key of -// SPHINCS+-SHA2-128s -#define SPX_PUBLIC_KEY_BYTES 32 - -// SPX_SECRET_KEY_BYTES is the number of bytes in the private key of -// SPHINCS+-SHA2-128s -#define SPX_SECRET_KEY_BYTES 64 - -// SPX_SIGNATURE_BYTES is the number of bytes in a signature of -// SPHINCS+-SHA2-128s -#define SPX_SIGNATURE_BYTES 7856 - -// SPX_generate_key generates a SPHINCS+-SHA2-128s key pair and writes the -// result to |out_public_key| and |out_secret_key|. -// Private key: SK.seed || SK.prf || PK.seed || PK.root -// Public key: PK.seed || PK.root -OPENSSL_EXPORT void SPX_generate_key( - uint8_t out_public_key[SPX_PUBLIC_KEY_BYTES], - uint8_t out_secret_key[SPX_SECRET_KEY_BYTES]); - -// SPX_generate_key_from_seed generates a SPHINCS+-SHA2-128s key pair from a -// 48-byte seed and writes the result to |out_public_key| and |out_secret_key|. -// Secret key: SK.seed || SK.prf || PK.seed || PK.root -// Public key: PK.seed || PK.root -OPENSSL_EXPORT void SPX_generate_key_from_seed( - uint8_t out_public_key[SPX_PUBLIC_KEY_BYTES], - uint8_t out_secret_key[SPX_SECRET_KEY_BYTES], - const uint8_t seed[3 * SPX_N]); - -// SPX_sign generates a SPHINCS+-SHA2-128s signature over |msg| or length -// |msg_len| using |secret_key| and writes the output to |out_signature|. -// -// if |randomized| is 0, deterministic signing is performed, otherwise, -// non-deterministic signing is performed. -OPENSSL_EXPORT void SPX_sign( - uint8_t out_snignature[SPX_SIGNATURE_BYTES], - const uint8_t secret_key[SPX_SECRET_KEY_BYTES], const uint8_t *msg, - size_t msg_len, int randomized); - -// SPX_verify verifies a SPHINCS+-SHA2-128s signature in |signature| over |msg| -// or length |msg_len| using |public_key|. 1 is returned if the signature -// matches, 0 otherwise. -OPENSSL_EXPORT int SPX_verify( - const uint8_t signature[SPX_SIGNATURE_BYTES], - const uint8_t public_key[SPX_SECRET_KEY_BYTES], const uint8_t *msg, - size_t msg_len); - -#endif //OPENSSL_UNSTABLE_EXPERIMENTAL_SPX - - -#if defined(__cplusplus) -} // extern C -#endif - -#endif // OPENSSL_HEADER_SPX_H