From bba8a5958b48fb33334b3b93db0335448ae57683 Mon Sep 17 00:00:00 2001 From: "A. Googler" Date: Tue, 8 Nov 2022 22:33:19 -0800 Subject: [PATCH] Project import generated by Copybara. GitOrigin-RevId: 1fdbf4fd0c5b59c42ce4f8b0756a07f44938bea1 --- .dockerignore | 12 ++ WORKSPACE | 8 +- .../parser => common}/javacc_parser.bzl | 0 backend/database/BUILD | 5 +- backend/database/database.cc | 26 ++- backend/database/database.h | 20 +- backend/database/database_test.cc | 82 +++++---- backend/query/BUILD | 10 +- backend/query/catalog.cc | 10 + backend/query/catalog.h | 5 + backend/query/function_catalog.cc | 4 +- backend/query/query_engine.cc | 34 ++-- backend/query/query_validator.cc | 22 ++- backend/query/query_validator_test.cc | 72 ++++++++ backend/query/queryable_column.h | 24 +++ backend/query/queryable_table.cc | 32 ++++ backend/query/queryable_table.h | 5 + backend/schema/backfills/BUILD | 6 +- .../backfills/column_value_backfill_test.cc | 16 +- .../schema/backfills/index_backfill_test.cc | 19 +- backend/schema/ddl/operations.proto | 4 + backend/schema/parser/BUILD | 2 +- backend/schema/parser/ddl_parser.jjt | 24 ++- backend/schema/parser/ddl_parser_test.cc | 134 +++++++------- backend/schema/parser/ddl_reserved_words.cc | 2 + backend/schema/updater/BUILD | 4 - backend/schema/updater/schema_updater.cc | 34 ++-- backend/schema/updater/schema_updater.h | 32 ++-- .../updater/schema_updater_tests/base.cc | 4 +- backend/schema/verifiers/BUILD | 7 +- .../check_constraint_verifiers_test.cc | 12 +- .../verifiers/column_value_verifiers_test.cc | 22 +-- .../verifiers/foreign_key_verifiers_test.cc | 7 +- build/bazel/zetasql.patch | 10 - build/docker/Dockerfile.ubuntu | 14 +- .../{continuous.cfg => continuous_cpp.cfg} | 2 +- build/kokoro/gcp_ubuntu/continuous_csharp.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_go.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_java.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_nodejs.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_php.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_py.cfg | 35 ++++ build/kokoro/gcp_ubuntu/continuous_ruby.cfg | 35 ++++ build/kokoro/gcp_ubuntu/docker_test.sh | 2 +- common/BUILD | 3 + common/errors.cc | 18 +- common/errors.h | 7 +- frontend/collections/BUILD | 1 + frontend/collections/database_manager.cc | 4 +- frontend/collections/database_manager.h | 3 +- frontend/collections/database_manager_test.cc | 45 ++--- frontend/collections/instance_manager.cc | 21 ++- frontend/collections/instance_manager_test.cc | 172 +++++++++++++++--- frontend/entities/instance.cc | 1 + frontend/entities/instance.h | 10 +- frontend/entities/instance_test.cc | 2 +- frontend/handlers/BUILD | 3 +- frontend/handlers/databases.cc | 15 +- frontend/server/BUILD | 2 - frontend/server/request_context_test.cc | 14 +- tests/common/proto_matchers.cc | 2 +- tests/common/schema_constructor.h | 3 +- .../cases/column_default_value_read_write.cc | 71 ++++++++ tests/conformance/cases/information_schema.cc | 1 + tests/conformance/cases/snapshot_reads.cc | 86 +++++---- tests/gcloud/instance_admin_test.py | 9 +- 66 files changed, 1097 insertions(+), 369 deletions(-) create mode 100644 .dockerignore rename backend/{schema/parser => common}/javacc_parser.bzl (100%) rename build/kokoro/gcp_ubuntu/{continuous.cfg => continuous_cpp.cfg} (95%) create mode 100644 build/kokoro/gcp_ubuntu/continuous_csharp.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_go.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_java.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_nodejs.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_php.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_py.cfg create mode 100644 build/kokoro/gcp_ubuntu/continuous_ruby.cfg diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..8a7b9206 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,12 @@ +bazel-* +.git +.gitignore +*.md +LICENSE +.dockerignore +build/docker/ +build/gcb/ +build/kokoro/ +copybara +METADATA +OWNERS* diff --git a/WORKSPACE b/WORKSPACE index 71659eda..3cbf3c18 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -100,7 +100,7 @@ http_archive( load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") -go_register_toolchains(version = "1.19.1") +go_register_toolchains(version = "1.19.2") http_archive( name = "bazel_gazelle", @@ -278,12 +278,12 @@ http_archive( http_archive( name = "com_google_zetasql", - url = "https://github.com/google/zetasql/archive/2022.08.1.tar.gz", - strip_prefix = "zetasql-2022.08.1", + url = "https://github.com/google/zetasql/archive/177d495a064e38684c462cf883e22428273bd996.tar.gz", + strip_prefix = "zetasql-177d495a064e38684c462cf883e22428273bd996", # Patches applied: # - Give visibility to ZetaSQL's base library to reuse some utilities patches = ["//build/bazel:zetasql.patch"], - sha256 = "4c9611fa2fc2bde0e7877ff36fa3ebc0400477a2fe86589025d49a06897e5296", + sha256 = "4092dce28d3fb5b0071d0268bcb3ba13e28eb4f981c5c267688b8c3590ca7705", ) http_archive( diff --git a/backend/schema/parser/javacc_parser.bzl b/backend/common/javacc_parser.bzl similarity index 100% rename from backend/schema/parser/javacc_parser.bzl rename to backend/common/javacc_parser.bzl diff --git a/backend/database/BUILD b/backend/database/BUILD index c5ca4202..e2242565 100644 --- a/backend/database/BUILD +++ b/backend/database/BUILD @@ -37,7 +37,6 @@ cc_library( "//backend/schema/updater:scoped_schema_change_lock", "//backend/storage", "//backend/storage:in_memory_storage", - "//backend/transaction:actions", "//backend/transaction:read_only_transaction", "//backend/transaction:read_write_transaction", "//common:clock", @@ -46,7 +45,6 @@ cc_library( "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", "@com_google_absl//absl/time", - "@com_google_absl//absl/types:span", "@com_google_absl//absl/types:variant", "@com_google_zetasql//zetasql/public:type", ], @@ -60,10 +58,9 @@ cc_test( deps = [ ":database", "//backend/access:read", - "//backend/common:ids", "//backend/datamodel:key_set", + "//backend/schema/updater:schema_updater", "//backend/transaction:read_only_transaction", - "//backend/transaction:read_write_transaction", "//common:clock", "//common:errors", "//tests/common:proto_matchers", diff --git a/backend/database/database.cc b/backend/database/database.cc index 08804694..a04147a9 100644 --- a/backend/database/database.cc +++ b/backend/database/database.cc @@ -23,22 +23,17 @@ #include "absl/memory/memory.h" #include "absl/status/statusor.h" -#include "absl/strings/match.h" -#include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" -#include "absl/time/clock.h" #include "absl/time/time.h" #include "absl/types/variant.h" #include "backend/actions/manager.h" #include "backend/common/ids.h" #include "backend/locking/manager.h" -#include "backend/locking/request.h" #include "backend/query/query_engine.h" #include "backend/schema/catalog/versioned_catalog.h" #include "backend/schema/updater/schema_updater.h" #include "backend/schema/updater/scoped_schema_change_lock.h" #include "backend/storage/in_memory_storage.h" -#include "backend/transaction/actions.h" #include "backend/transaction/options.h" #include "absl/status/status.h" #include "zetasql/base/status_macros.h" @@ -53,7 +48,7 @@ namespace backend { Database::Database() : transaction_id_generator_(1) {} absl::StatusOr> Database::Create( - Clock* clock, const std::vector& create_statements) { + Clock* clock, const SchemaChangeOperation& schema_change_operation) { auto database = absl::WrapUnique(new Database()); database->clock_ = clock; database->storage_ = std::make_unique(); @@ -63,13 +58,13 @@ absl::StatusOr> Database::Create( std::make_unique(database->type_factory_.get()); database->action_manager_ = std::make_unique(); - if (create_statements.empty()) { + if (schema_change_operation.statements.empty()) { database->versioned_catalog_ = std::make_unique(); } else { SchemaUpdater updater; ZETASQL_ASSIGN_OR_RETURN( std::unique_ptr schema, - updater.CreateSchemaFromDDL(create_statements, + updater.CreateSchemaFromDDL(schema_change_operation, database->GetSchemaChangeContext())); database->versioned_catalog_ = std::make_unique(std::move(schema)); @@ -107,11 +102,11 @@ SchemaChangeContext Database::GetSchemaChangeContext() { }; } -absl::Status Database::UpdateSchema(absl::Span statements, - int* num_succesful_statements, - absl::Time* commit_timestamp, - absl::Status* backfill_status) { - if (statements.empty()) { +absl::Status Database::UpdateSchema( + const SchemaChangeOperation& schema_change_operation, + int* num_succesful_statements, absl::Time* commit_timestamp, + absl::Status* backfill_status) { + if (schema_change_operation.statements.empty()) { return error::UpdateDatabaseMissingStatements(); } @@ -130,8 +125,9 @@ absl::Status Database::UpdateSchema(absl::Span statements, context.schema_change_timestamp = update_timestamp; const Schema* existing_schema = versioned_catalog_->GetLatestSchema(); SchemaUpdater updater; - ZETASQL_ASSIGN_OR_RETURN(auto result, updater.UpdateSchemaFromDDL( - existing_schema, statements, context)); + ZETASQL_ASSIGN_OR_RETURN(auto result, + updater.UpdateSchemaFromDDL( + existing_schema, schema_change_operation, context)); *commit_timestamp = update_timestamp; *num_succesful_statements = result.num_successful_statements; *backfill_status = result.backfill_status; diff --git a/backend/database/database.h b/backend/database/database.h index 70057b7a..aed0435d 100644 --- a/backend/database/database.h +++ b/backend/database/database.h @@ -26,7 +26,6 @@ #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" -#include "absl/types/span.h" #include "absl/types/variant.h" #include "backend/actions/manager.h" #include "backend/common/ids.h" @@ -57,7 +56,7 @@ class Database { // create_statements. Returns an error if create_statements are invalid, or if // failed to create the database. static absl::StatusOr> Create( - Clock* clock, const std::vector& create_statements); + Clock* clock, const SchemaChangeOperation& schema_change_operation); // Creates a read only transaction attached to this database. absl::StatusOr> @@ -75,15 +74,16 @@ class Database { // incoming schema change requests will be rejected with a FAILED_PRECONDITION // error. // - // DDL statements in `statements` are applied one-by-one until they either all - // succeed or the first failure is encoutered. + // DDL statements in `schema_change_operation.statements` are applied + // one-by-one until they either all succeed or the first failure is + // encoutered. // // On return `num_successful_statements` will contain the number of // successfully applied DDL statements and `commit_timestamp` will contain the // timestamp at which they were applied. // - // If all the statements in `statements` are applied succesfully, both - // `backfill_status` and the returned status will be set to + // If all the statements in `schema_change_operation.statements` are applied + // succesfully, both `backfill_status` and the returned status will be set to // absl::OkStatus(). // // If all the statements are semantically valid then the return status will @@ -96,10 +96,10 @@ class Database { // encountered while processing the backfill/verification actions for the // statements, then the first such error will be returned in // `backfill_status`. - absl::Status UpdateSchema(absl::Span statements, - int* num_succesful_statements, - absl::Time* commit_timestamp, - absl::Status* backfill_status); + absl::Status UpdateSchema( + const SchemaChangeOperation& schema_change_operation, + int* num_succesful_statements, absl::Time* commit_timestamp, + absl::Status* backfill_status); // Retrives the current version of the schema. const Schema* GetLatestSchema() const; diff --git a/backend/database/database_test.cc b/backend/database/database_test.cc index cf88a5d3..7e0194cd 100644 --- a/backend/database/database_test.cc +++ b/backend/database/database_test.cc @@ -27,8 +27,8 @@ #include "tests/common/proto_matchers.h" #include "absl/status/status.h" #include "backend/access/read.h" -#include "backend/common/ids.h" #include "backend/datamodel/key_set.h" +#include "backend/schema/updater/schema_updater.h" #include "backend/transaction/options.h" #include "common/clock.h" #include "common/errors.h" @@ -43,7 +43,7 @@ using zetasql::values::Int64; class DatabaseTest : public ::testing::Test { public: - DatabaseTest() {} + DatabaseTest() = default; ReadArg read_column(std::string table_name, std::string column_name) { ReadArg args; @@ -58,21 +58,24 @@ class DatabaseTest : public ::testing::Test { }; TEST_F(DatabaseTest, CreateSuccessful) { - ZETASQL_EXPECT_OK(Database::Create(&clock_, /*create_statements=*/{})); + ZETASQL_EXPECT_OK(Database::Create(&clock_, SchemaChangeOperation{})); - ZETASQL_EXPECT_OK(Database::Create(&clock_, {R"( + std::vector create_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, ) PRIMARY KEY(k1) )", - R"( - CREATE INDEX I on T(k1))"})); + R"( + CREATE INDEX I on T(k1))"}; + + ZETASQL_EXPECT_OK(Database::Create( + &clock_, SchemaChangeOperation{.statements = create_statements})); } TEST_F(DatabaseTest, UpdateSchemaSuccessful) { ZETASQL_ASSERT_OK_AND_ASSIGN(auto db, - Database::Create(&clock_, /*create_statements=*/{})); + Database::Create(&clock_, SchemaChangeOperation{})); std::vector update_statements = {R"( CREATE TABLE T( @@ -87,18 +90,22 @@ TEST_F(DatabaseTest, UpdateSchemaSuccessful) { absl::Status backfill_status; int completed_statements; absl::Time commit_ts; - ZETASQL_EXPECT_OK(db->UpdateSchema(update_statements, &completed_statements, - &commit_ts, &backfill_status)); + ZETASQL_EXPECT_OK( + db->UpdateSchema(SchemaChangeOperation{.statements = update_statements}, + &completed_statements, &commit_ts, &backfill_status)); ZETASQL_EXPECT_OK(backfill_status); } TEST_F(DatabaseTest, UpdateSchemaPartialSuccess) { - ZETASQL_ASSERT_OK_AND_ASSIGN(auto db, Database::Create(&clock_, {R"( + std::vector create_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, ) PRIMARY KEY(k1) - )"})); + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + auto db, Database::Create(&clock_, SchemaChangeOperation{ + .statements = create_statements})); ZETASQL_ASSERT_OK_AND_ASSIGN( std::unique_ptr txn, @@ -133,8 +140,9 @@ TEST_F(DatabaseTest, UpdateSchemaPartialSuccess) { absl::Time commit_ts; // The statements are semantically valid, indicated by an OK return status. - ZETASQL_EXPECT_OK(db->UpdateSchema(update_statements, &completed_statements, - &commit_ts, &backfill_status)); + ZETASQL_EXPECT_OK( + db->UpdateSchema(SchemaChangeOperation{.statements = update_statements}, + &completed_statements, &commit_ts, &backfill_status)); // But the backfill statements fail. EXPECT_EQ(backfill_status, @@ -145,13 +153,15 @@ TEST_F(DatabaseTest, UpdateSchemaPartialSuccess) { } TEST_F(DatabaseTest, ConcurrentSchemaChangeIsAborted) { - ZETASQL_ASSERT_OK_AND_ASSIGN(auto db, Database::Create(&clock_, { - R"( + std::vector create_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, - ) PRIMARY KEY(k1))", - })); + ) PRIMARY KEY(k1) + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + auto db, Database::Create(&clock_, SchemaChangeOperation{ + .statements = create_statements})); // Initiate a Read inside a read-write transaction to acquire locks. std::unique_ptr row_cursor; @@ -160,40 +170,46 @@ TEST_F(DatabaseTest, ConcurrentSchemaChangeIsAborted) { db->CreateReadWriteTransaction(ReadWriteOptions(), RetryState())); ZETASQL_EXPECT_OK(txn->Read(read_column("T", "k1"), &row_cursor)); - absl::Status backfill_status; - int completed_statements; - absl::Time commit_ts; - EXPECT_EQ( - db->UpdateSchema({R"( + std::vector update_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, ) PRIMARY KEY(k1) - )"}, + )"}; + absl::Status backfill_status; + int completed_statements; + absl::Time commit_ts; + EXPECT_EQ( + db->UpdateSchema(SchemaChangeOperation{.statements = update_statements}, &completed_statements, &commit_ts, &backfill_status), error::ConcurrentSchemaChangeOrReadWriteTxnInProgress()); } TEST_F(DatabaseTest, SchemaChangeLocksSuccesfullyReleased) { - ZETASQL_ASSERT_OK_AND_ASSIGN(auto db, Database::Create(&clock_, {R"( + std::vector create_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, - ) PRIMARY KEY(k1))"})); + ) PRIMARY KEY(k1) + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + auto db, Database::Create(&clock_, SchemaChangeOperation{ + .statements = create_statements})); // Schema update will fail. - absl::Status backfill_status; - int completed_statements; - absl::Time commit_ts; - EXPECT_FALSE(db->UpdateSchema({R"( + std::vector update_statements = {R"( CREATE TABLE T( k1 INT64, k2 INT64, ) PRIMARY KEY(k1) - )"}, - &completed_statements, &commit_ts, - &backfill_status) - .ok()); + )"}; + absl::Status backfill_status; + int completed_statements; + absl::Time commit_ts; + EXPECT_FALSE( + db->UpdateSchema(SchemaChangeOperation{.statements = update_statements}, + &completed_statements, &commit_ts, &backfill_status) + .ok()); // Can still run transactions as locks would have been released. std::unique_ptr row_cursor; diff --git a/backend/query/BUILD b/backend/query/BUILD index 78dd62ec..a0f320c2 100644 --- a/backend/query/BUILD +++ b/backend/query/BUILD @@ -165,7 +165,10 @@ cc_test( "@com_google_absl//absl/status", "@com_google_googletest//:gtest_main", "@com_google_zetasql//zetasql/base/testing:status_matchers", + "@com_google_zetasql//zetasql/public:builtin_function", + "@com_google_zetasql//zetasql/public:type", "@com_google_zetasql//zetasql/resolved_ast", + "@com_google_zetasql//zetasql/resolved_ast:make_node_vector", ], ) @@ -228,6 +231,7 @@ cc_library( "@com_google_absl//absl/memory", "@com_google_absl//absl/status", "@com_google_absl//absl/strings", + "@com_google_zetasql//zetasql/public:analyzer_options", "@com_google_zetasql//zetasql/public:catalog", "@com_google_zetasql//zetasql/public:function", "@com_google_zetasql//zetasql/public:simple_catalog", @@ -286,6 +290,7 @@ cc_library( deps = [ "//backend/schema/catalog:schema", "@com_google_absl//absl/strings", + "@com_google_zetasql//zetasql/public:analyzer_output", "@com_google_zetasql//zetasql/public:catalog", "@com_google_zetasql//zetasql/public:type", ], @@ -307,6 +312,9 @@ cc_library( "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/types:span", + "@com_google_zetasql//zetasql/public:analyzer", + "@com_google_zetasql//zetasql/public:analyzer_options", + "@com_google_zetasql//zetasql/public:analyzer_output", "@com_google_zetasql//zetasql/public:catalog", "@com_google_zetasql//zetasql/public:evaluator_table_iterator", "@com_google_zetasql//zetasql/public:value", @@ -328,7 +336,7 @@ cc_library( "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", - "@com_google_absl//absl/strings", + "@com_google_zetasql//zetasql/base:ret_check", "@com_google_zetasql//zetasql/public:builtin_function", "@com_google_zetasql//zetasql/public:builtin_function_options", "@com_google_zetasql//zetasql/public:function", diff --git a/backend/query/catalog.cc b/backend/query/catalog.cc index 636434d3..8118cc53 100644 --- a/backend/query/catalog.cc +++ b/backend/query/catalog.cc @@ -78,6 +78,16 @@ Catalog::Catalog(const Schema* schema, const FunctionCatalog* function_catalog, } } +Catalog::Catalog(const Schema* schema, const FunctionCatalog* function_catalog, + RowReader* reader, const zetasql::AnalyzerOptions& options, + zetasql::TypeFactory* type_factory) + : schema_(schema), function_catalog_(function_catalog) { + for (const auto* table : schema->tables()) { + tables_[table->Name()] = std::make_unique( + table, reader, options, this, type_factory); + } +} + absl::Status Catalog::GetCatalog(const std::string& name, zetasql::Catalog** catalog, const FindOptions& options) { diff --git a/backend/query/catalog.h b/backend/query/catalog.h index 2f6c63af..a7dd686b 100644 --- a/backend/query/catalog.h +++ b/backend/query/catalog.h @@ -20,6 +20,7 @@ #include #include +#include "zetasql/public/analyzer_options.h" #include "zetasql/public/catalog.h" #include "zetasql/public/function.h" #include "zetasql/public/simple_catalog.h" @@ -50,6 +51,10 @@ class Catalog : public zetasql::EnumerableCatalog { Catalog(const Schema* schema, const FunctionCatalog* function_catalog) : Catalog(schema, function_catalog, /*reader=*/nullptr) {} + Catalog(const Schema* schema, const FunctionCatalog* function_catalog, + RowReader* reader, const zetasql::AnalyzerOptions& options, + zetasql::TypeFactory* type_factory); + std::string FullName() const final { // The name of the root catalog is "". return ""; diff --git a/backend/query/function_catalog.cc b/backend/query/function_catalog.cc index 64eefedd..f8256a6e 100644 --- a/backend/query/function_catalog.cc +++ b/backend/query/function_catalog.cc @@ -29,10 +29,11 @@ #include "zetasql/public/type.h" #include "zetasql/public/types/type_factory.h" #include "absl/container/flat_hash_set.h" +#include "absl/status/status.h" #include "absl/status/statusor.h" -#include "absl/strings/ascii.h" #include "backend/query/analyzer_options.h" #include "common/constants.h" +#include "zetasql/base/ret_check.h" #include "absl/status/status.h" namespace google { @@ -68,7 +69,6 @@ std::unique_ptr PendingCommitTimestampFunction() { zetasql::types::TimestampType(), {}, nullptr}}, function_options); } - } // namespace FunctionCatalog::FunctionCatalog(zetasql::TypeFactory* type_factory) { diff --git a/backend/query/query_engine.cc b/backend/query/query_engine.cc index 8f43c9ca..8ab45279 100644 --- a/backend/query/query_engine.cc +++ b/backend/query/query_engine.cc @@ -341,7 +341,7 @@ absl::StatusOr PendingCommitTimestampColumnsInUpdate( absl::StatusOr> EvaluateResolvedInsert( const zetasql::ResolvedInsertStmt* insert_statement, - const zetasql::ParameterValueMap& parameters, + zetasql::Catalog* catalog, const zetasql::ParameterValueMap& parameters, zetasql::TypeFactory* type_factory) { ZETASQL_ASSIGN_OR_RETURN(auto pending_ts_columns, PendingCommitTimestampColumnsInInsert( @@ -352,7 +352,7 @@ absl::StatusOr> EvaluateResolvedInsert( insert_statement, CommonEvaluatorOptions(type_factory)); ZETASQL_ASSIGN_OR_RETURN(auto analyzer_options, MakeAnalyzerOptionsWithParameters(parameters)); - ZETASQL_RETURN_IF_ERROR(prepared_insert->Prepare(analyzer_options)); + ZETASQL_RETURN_IF_ERROR(prepared_insert->Prepare(analyzer_options, catalog)); auto status_or = prepared_insert->Execute(parameters); if (!status_or.ok()) { @@ -365,7 +365,7 @@ absl::StatusOr> EvaluateResolvedInsert( absl::StatusOr> EvaluateResolvedUpdate( const zetasql::ResolvedUpdateStmt* update_statement, - const zetasql::ParameterValueMap& parameters, + zetasql::Catalog* catalog, const zetasql::ParameterValueMap& parameters, zetasql::TypeFactory* type_factory) { ZETASQL_ASSIGN_OR_RETURN(auto pending_ts_columns, PendingCommitTimestampColumnsInUpdate( @@ -375,7 +375,7 @@ absl::StatusOr> EvaluateResolvedUpdate( update_statement, CommonEvaluatorOptions(type_factory)); ZETASQL_ASSIGN_OR_RETURN(auto analyzer_options, MakeAnalyzerOptionsWithParameters(parameters)); - ZETASQL_RETURN_IF_ERROR(prepared_update->Prepare(analyzer_options)); + ZETASQL_RETURN_IF_ERROR(prepared_update->Prepare(analyzer_options, catalog)); auto status_or = prepared_update->Execute(parameters); if (!status_or.ok()) { @@ -388,13 +388,13 @@ absl::StatusOr> EvaluateResolvedUpdate( absl::StatusOr> EvaluateResolvedDelete( const zetasql::ResolvedDeleteStmt* delete_statement, - const zetasql::ParameterValueMap& parameters, + zetasql::Catalog* catalog, const zetasql::ParameterValueMap& parameters, zetasql::TypeFactory* type_factory) { auto prepared_delete = std::make_unique( delete_statement, CommonEvaluatorOptions(type_factory)); ZETASQL_ASSIGN_OR_RETURN(auto analyzer_options, MakeAnalyzerOptionsWithParameters(parameters)); - ZETASQL_RETURN_IF_ERROR(prepared_delete->Prepare(analyzer_options)); + ZETASQL_RETURN_IF_ERROR(prepared_delete->Prepare(analyzer_options, catalog)); ZETASQL_ASSIGN_OR_RETURN(auto iterator, prepared_delete->Execute(parameters)); return BuildDelete(std::move(iterator)); @@ -404,20 +404,20 @@ absl::StatusOr> EvaluateResolvedDelete( // resolved AST and returns a pair of mutation and count of modified rows. absl::StatusOr> EvaluateUpdate( const zetasql::ResolvedStatement* resolved_statement, - const zetasql::ParameterValueMap& parameters, + zetasql::Catalog* catalog, const zetasql::ParameterValueMap& parameters, zetasql::TypeFactory* type_factory) { switch (resolved_statement->node_kind()) { case zetasql::RESOLVED_INSERT_STMT: return EvaluateResolvedInsert( - resolved_statement->GetAs(), + resolved_statement->GetAs(), catalog, parameters, type_factory); case zetasql::RESOLVED_UPDATE_STMT: return EvaluateResolvedUpdate( - resolved_statement->GetAs(), + resolved_statement->GetAs(), catalog, parameters, type_factory); case zetasql::RESOLVED_DELETE_STMT: return EvaluateResolvedDelete( - resolved_statement->GetAs(), + resolved_statement->GetAs(), catalog, parameters, type_factory); default: ZETASQL_RET_CHECK_FAIL() << "Unsupported support node kind " @@ -607,7 +607,13 @@ absl::StatusOr QueryEngine::GetDmlTargetTable( absl::StatusOr QueryEngine::ExecuteSql( const Query& query, const QueryContext& context) const { absl::Time start_time = absl::Now(); - Catalog catalog{context.schema, &function_catalog_, context.reader}; + + ZETASQL_ASSIGN_OR_RETURN(auto analyzer_options, + MakeAnalyzerOptionsWithParameters(query.declared_params)); + + Catalog catalog{context.schema, &function_catalog_, context.reader, + analyzer_options, type_factory_}; + ZETASQL_ASSIGN_OR_RETURN(auto analyzer_output, Analyze(query.sql, query.declared_params, &catalog, type_factory_, /*prune_unused_columns=*/true)); @@ -634,9 +640,9 @@ absl::StatusOr QueryEngine::ExecuteSql( ExtractValidatedResolvedStatementAndOptions( analyzer_output.get(), context.schema)); - ZETASQL_ASSIGN_OR_RETURN( - const auto& mutation_and_count, - EvaluateUpdate(resolved_statement.get(), params, type_factory_)); + ZETASQL_ASSIGN_OR_RETURN(const auto& mutation_and_count, + EvaluateUpdate(resolved_statement.get(), &catalog, params, + type_factory_)); ZETASQL_RETURN_IF_ERROR(context.writer->Write(mutation_and_count.first)); result.modified_row_count = static_cast(mutation_and_count.second); } diff --git a/backend/query/query_validator.cc b/backend/query/query_validator.cc index bd4d2c0c..b19bb63a 100644 --- a/backend/query/query_validator.cc +++ b/backend/query/query_validator.cc @@ -70,6 +70,9 @@ constexpr absl::string_view kHintJoinBatch = "batch_mode"; constexpr absl::string_view kHashJoinBuildSide = "hash_join_build_side"; constexpr absl::string_view kHashJoinBuildSideLeft = "build_left"; constexpr absl::string_view kHashJoinBuildSideRight = "build_right"; +constexpr absl::string_view kHashJoinExecution = "hash_join_execution"; +constexpr absl::string_view kHashJoinExecutionOnePass = "one_pass"; +constexpr absl::string_view kHashJoinExecutionMultiPass = "multi_pass"; // Group by constexpr absl::string_view kHintGroupMethod = "group_method"; @@ -104,6 +107,8 @@ constexpr absl::string_view kHintDisableQueryPartitionabilityCheck = constexpr absl::string_view kHintDisableQueryNullFilteredIndexCheck = "disable_query_null_filtered_index_check"; +constexpr absl::string_view kHintDisableInline = "disable_inline"; + absl::Status CollectHintsForNode( const zetasql::ResolvedOption* hint, absl::flat_hash_map* node_hint_map) { @@ -168,7 +173,7 @@ absl::Status QueryValidator::CheckSpannerHintName( {kHintForceIndex, kHintTableScanGroupByScanOptimization}}, {zetasql::RESOLVED_JOIN_SCAN, {kHintJoinTypeDeprecated, kHintJoinMethod, kHashJoinBuildSide, - kHintJoinForceOrder}}, + kHintJoinForceOrder, kHashJoinExecution}}, {zetasql::RESOLVED_AGGREGATE_SCAN, {kHintGroupTypeDeprecated, kHintGroupMethod}}, {zetasql::RESOLVED_ARRAY_SCAN, @@ -178,12 +183,13 @@ absl::Status QueryValidator::CheckSpannerHintName( {kHintForceIndex, kHintJoinTypeDeprecated, kHintJoinMethod, kHashJoinBuildSide, kHintJoinForceOrder, kHintConstantFolding, kUseAdditionalParallelism, kHintEnableAdaptivePlans, - kHintLockScannedRange, kHintParameterSensitive}}, + kHintLockScannedRange, kHintParameterSensitive, kHashJoinExecution}}, {zetasql::RESOLVED_SUBQUERY_EXPR, {kHintJoinTypeDeprecated, kHintJoinMethod, kHashJoinBuildSide, - kHintJoinBatch, kHintJoinForceOrder}}, + kHintJoinBatch, kHintJoinForceOrder, kHashJoinExecution}}, {zetasql::RESOLVED_SET_OPERATION_SCAN, - {kHintJoinMethod, kHintJoinForceOrder}}}; + {kHintJoinMethod, kHintJoinForceOrder}}, + {zetasql::RESOLVED_FUNCTION_CALL, {kHintDisableInline}}}; const auto& iter = supported_hints->find(node_kind); if (iter == supported_hints->end() || !iter->second.contains(name)) { @@ -223,6 +229,7 @@ absl::Status QueryValidator::CheckHintValue( {kHintParameterSensitive, zetasql::types::StringType()}, {kHintJoinMethod, zetasql::types::StringType()}, {kHashJoinBuildSide, zetasql::types::StringType()}, + {kHashJoinExecution, zetasql::types::StringType()}, {kHintJoinBatch, zetasql::types::BoolType()}, {kHintJoinForceOrder, zetasql::types::BoolType()}, {kHintGroupTypeDeprecated, zetasql::types::StringType()}, @@ -233,6 +240,7 @@ absl::Status QueryValidator::CheckHintValue( {kHintConstantFolding, zetasql::types::BoolType()}, {kHintTableScanGroupByScanOptimization, zetasql::types::BoolType()}, {kHintEnableAdaptivePlans, zetasql::types::BoolType()}, + {kHintDisableInline, zetasql::types::BoolType()}, }}; const auto& iter = supported_hint_types->find(name); @@ -292,6 +300,12 @@ absl::Status QueryValidator::CheckHintValue( absl::EqualsIgnoreCase(string_value, kHashJoinBuildSideRight))) { return error::InvalidHintValue(name, value.DebugString()); } + } else if (absl::EqualsIgnoreCase(name, kHashJoinExecution)) { + const std::string& string_value = value.string_value(); + if (!(absl::EqualsIgnoreCase(string_value, kHashJoinExecutionOnePass) || + absl::EqualsIgnoreCase(string_value, kHashJoinExecutionMultiPass))) { + return error::InvalidHintValue(name, value.DebugString()); + } } else if (absl::EqualsIgnoreCase(name, kHintGroupMethod) || absl::EqualsIgnoreCase(name, kHintGroupTypeDeprecated)) { const std::string& string_value = value.string_value(); diff --git a/backend/query/query_validator_test.cc b/backend/query/query_validator_test.cc index 3f1250b7..aa86f53e 100644 --- a/backend/query/query_validator_test.cc +++ b/backend/query/query_validator_test.cc @@ -16,9 +16,14 @@ #include "backend/query/query_validator.h" +#include #include +#include #include +#include "zetasql/public/builtin_function.h" +#include "zetasql/public/types/type_factory.h" +#include "zetasql/resolved_ast/make_node_vector.h" #include "zetasql/resolved_ast/resolved_ast.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -36,6 +41,7 @@ namespace backend { namespace { +using zetasql::Function; using zetasql_base::testing::StatusIs; class QueryValidatorTest : public testing::Test { @@ -156,6 +162,72 @@ TEST_F(QueryValidatorTest, CollectEmulatorOnlyOptionsFromHints) { } } +TEST_F(QueryValidatorTest, ValidateDisableInlineHintReturnsOK) { + zetasql::TypeFactory type_factory; + std::map> functions; + zetasql::ZetaSQLBuiltinFunctionOptions options; + + zetasql::GetZetaSQLFunctions(&type_factory, options, &functions); + + zetasql::Function* substr = functions["substr"].get(); + const zetasql::FunctionSignature* signature = substr->GetSignature(0); + + std::unique_ptr resolved_function_call = + zetasql::MakeResolvedFunctionCall( + zetasql::types::StringType(), substr, *signature, + zetasql::MakeNodeVectorP( + zetasql::MakeResolvedLiteral(zetasql::Value::String("Hello")), + zetasql::MakeResolvedLiteral(zetasql::Value::Int32(0)), + zetasql::MakeResolvedLiteral(zetasql::Value::Int32(1))), + zetasql::ResolvedFunctionCall::DEFAULT_ERROR_MODE); + + resolved_function_call->add_hint_list(zetasql::MakeResolvedOption( + /*qualifier=*/"", /*name=*/"disable_inline", + zetasql::MakeResolvedLiteral(zetasql::Value::Bool(true)))); + + QueryEngineOptions opts; + QueryValidator validator{schema(), &opts}; + ZETASQL_ASSERT_OK(resolved_function_call->Accept(&validator)); +} + +TEST_F(QueryValidatorTest, HashJoinExecutionHintOnePassReturnsOk) { + QueryableTable table{schema()->FindTable("test_table"), /*reader=*/nullptr}; + auto resolved_join_scan = zetasql::MakeResolvedJoinScan(); + resolved_join_scan->add_hint_list(zetasql::MakeResolvedOption( + /*qualifier=*/"", /*name=*/"hash_join_execution", + zetasql::MakeResolvedLiteral(zetasql::Value::String("one_pass")))); + + QueryEngineOptions opts; + QueryValidator validator{schema(), &opts}; + ZETASQL_ASSERT_OK(resolved_join_scan->Accept(&validator)); +} + +TEST_F(QueryValidatorTest, HashJoinExecutionHintMultiPassReturnsOk) { + QueryableTable table{schema()->FindTable("test_table"), /*reader=*/nullptr}; + auto resolved_join_scan = zetasql::MakeResolvedJoinScan(); + resolved_join_scan->add_hint_list(zetasql::MakeResolvedOption( + /*qualifier=*/"", /*name=*/"hash_join_execution", + zetasql::MakeResolvedLiteral(zetasql::Value::String("multi_pass")))); + + QueryEngineOptions opts; + QueryValidator validator{schema(), &opts}; + ZETASQL_ASSERT_OK(resolved_join_scan->Accept(&validator)); +} + +TEST_F(QueryValidatorTest, HashJoinExecutionHintInvalidReturnsError) { + QueryableTable table{schema()->FindTable("test_table"), /*reader=*/nullptr}; + auto resolved_join_scan = zetasql::MakeResolvedJoinScan(); + resolved_join_scan->add_hint_list(zetasql::MakeResolvedOption( + /*qualifier=*/"", /*name=*/"hash_join_execution", + zetasql::MakeResolvedLiteral( + zetasql::Value::String("invalid_value")))); + + QueryEngineOptions opts; + QueryValidator validator{schema(), &opts}; + ASSERT_THAT(resolved_join_scan->Accept(&validator), + StatusIs(absl::StatusCode::kInvalidArgument)); +} + } // namespace } // namespace backend diff --git a/backend/query/queryable_column.h b/backend/query/queryable_column.h index eb193e6b..7afcbc87 100644 --- a/backend/query/queryable_column.h +++ b/backend/query/queryable_column.h @@ -17,8 +17,12 @@ #ifndef THIRD_PARTY_CLOUD_SPANNER_EMULATOR_BACKEND_QUERY_QUERYABLE_COLUMN_H_ #define THIRD_PARTY_CLOUD_SPANNER_EMULATOR_BACKEND_QUERY_QUERYABLE_COLUMN_H_ +#include +#include #include +#include +#include "zetasql/public/analyzer_output.h" #include "zetasql/public/catalog.h" #include "zetasql/public/type.h" #include "absl/strings/str_cat.h" @@ -37,6 +41,10 @@ class QueryableColumn : public zetasql::Column { public: QueryableColumn(const backend::Column* column) : wrapped_column_(column) {} + QueryableColumn(const backend::Column* column, + std::unique_ptr output) + : wrapped_column_(column), output_(std::move(output)) {} + std::string Name() const override { return wrapped_column_->Name(); } std::string FullName() const override { return wrapped_column_->FullName(); } @@ -49,11 +57,27 @@ class QueryableColumn : public zetasql::Column { return !wrapped_column_->is_generated(); } + bool HasDefaultValue() const override { + return wrapped_column_->has_default_value(); + } + + std::optional ExpressionString() const override { + return wrapped_column_->expression(); + } + + const zetasql::ResolvedExpr* Expression() const override { + if (!HasDefaultValue() || output_ == nullptr) return nullptr; + return output_->resolved_expr(); + } + const backend::Column* wrapped_column() const { return wrapped_column_; } private: // The underlying schema column. const backend::Column* wrapped_column_; + // The AnalyzerOutput that holds the column's ResolvedExpr, representing + // default value expression. + const std::unique_ptr output_ = nullptr; }; } // namespace backend diff --git a/backend/query/queryable_table.cc b/backend/query/queryable_table.cc index 21d5ecd5..335fc673 100644 --- a/backend/query/queryable_table.cc +++ b/backend/query/queryable_table.cc @@ -22,6 +22,9 @@ #include #include +#include "zetasql/public/analyzer.h" +#include "zetasql/public/analyzer_options.h" +#include "zetasql/public/analyzer_output.h" #include "zetasql/public/evaluator_table_iterator.h" #include "zetasql/public/value.h" #include "absl/memory/memory.h" @@ -105,6 +108,35 @@ QueryableTable::QueryableTable(const backend::Table* table, RowReader* reader) } } +QueryableTable::QueryableTable(const backend::Table* table, RowReader* reader, + const zetasql::AnalyzerOptions& options, + zetasql::Catalog* catalog, + zetasql::TypeFactory* type_factory) + : wrapped_table_(table), reader_(reader) { + for (const auto* column : table->columns()) { + std::unique_ptr output = nullptr; + if (type_factory != nullptr && column->has_default_value()) { + absl::Status s = + zetasql::AnalyzeExpression(column->expression().value(), options, + catalog, type_factory, &output); + ZETASQL_DCHECK(s.ok()) << "Failed to analyze default expression for column " + << column->FullName() << "\n"; + } + columns_.push_back( + std::make_unique(column, std::move(output))); + } + + // Populate primary_key_column_indexes_. + for (const auto& key_column : table->primary_key()) { + for (int i = 0; i < wrapped_table_->columns().size(); ++i) { + if (key_column->column() == wrapped_table_->columns()[i]) { + primary_key_column_indexes_.push_back(i); + break; + } + } + } +} + absl::StatusOr> QueryableTable::CreateEvaluatorTableIterator( absl::Span column_idxs) const { diff --git a/backend/query/queryable_table.h b/backend/query/queryable_table.h index 8c4e2785..e6516018 100644 --- a/backend/query/queryable_table.h +++ b/backend/query/queryable_table.h @@ -42,6 +42,11 @@ class QueryableTable : public zetasql::Table { public: QueryableTable(const backend::Table* table, RowReader* reader); + QueryableTable(const backend::Table* table, RowReader* reader, + const zetasql::AnalyzerOptions& options, + zetasql::Catalog* catalog, + zetasql::TypeFactory* type_factory); + std::string Name() const override { return wrapped_table_->Name(); } // FullName is used in debugging so it's OK to not include full path here. diff --git a/backend/schema/backfills/BUILD b/backend/schema/backfills/BUILD index b619ecdb..f624c3ea 100644 --- a/backend/schema/backfills/BUILD +++ b/backend/schema/backfills/BUILD @@ -66,12 +66,10 @@ cc_test( ":schema_backfillers", "//backend/database", "//backend/schema/catalog:schema", + "//backend/schema/updater:schema_updater", "//backend/transaction:read_only_transaction", - "//backend/transaction:read_write_transaction", "//common:errors", - "//tests/common:actions", "//tests/common:proto_matchers", - "//tests/common:test_schema_constructor", "@com_github_grpc_grpc//:grpc++", "@com_google_absl//absl/status", "@com_google_absl//absl/types:span", @@ -90,9 +88,9 @@ cc_test( "//backend/database", "//backend/datamodel:key_set", "//backend/schema/catalog:schema", + "//backend/schema/updater:schema_updater", "//backend/transaction:read_only_transaction", "//common:errors", - "//tests/common:actions", "//tests/common:proto_matchers", "//tests/common:scoped_feature_flags_setter", "@com_github_grpc_grpc//:grpc++", diff --git a/backend/schema/backfills/column_value_backfill_test.cc b/backend/schema/backfills/column_value_backfill_test.cc index bfc352e6..7d7067e4 100644 --- a/backend/schema/backfills/column_value_backfill_test.cc +++ b/backend/schema/backfills/column_value_backfill_test.cc @@ -32,9 +32,9 @@ #include "backend/database/database.h" #include "backend/datamodel/key_set.h" #include "backend/schema/catalog/schema.h" +#include "backend/schema/updater/schema_updater.h" #include "backend/transaction/options.h" #include "common/errors.h" -#include "tests/common/actions.h" #include "tests/common/scoped_feature_flags_setter.h" namespace google { @@ -59,15 +59,18 @@ class ColumnValueBackfillTest : public ::testing::Test { protected: void SetUp() override { - std::vector create_statements; - ZETASQL_ASSERT_OK_AND_ASSIGN(database_, Database::Create(&clock_, {R"( + std::vector create_statements = {R"( CREATE TABLE TestTable ( int64_col INT64, string_col STRING(10), string_array_col ARRAY, bytes_array_col ARRAY ) PRIMARY KEY (int64_col) - )"})); + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + database_, + Database::Create( + &clock_, SchemaChangeOperation{.statements = create_statements})); ZETASQL_ASSERT_OK_AND_ASSIGN(std::unique_ptr txn, database_->CreateReadWriteTransaction( @@ -100,8 +103,9 @@ class ColumnValueBackfillTest : public ::testing::Test { int num_succesful; absl::Status backfill_status; absl::Time update_time; - ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema(update_statements, &num_succesful, - &update_time, &backfill_status)); + ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema( + SchemaChangeOperation{.statements = update_statements}, &num_succesful, + &update_time, &backfill_status)); return backfill_status; } diff --git a/backend/schema/backfills/index_backfill_test.cc b/backend/schema/backfills/index_backfill_test.cc index 5656ef37..f1680bdf 100644 --- a/backend/schema/backfills/index_backfill_test.cc +++ b/backend/schema/backfills/index_backfill_test.cc @@ -27,10 +27,9 @@ #include "absl/types/span.h" #include "backend/database/database.h" #include "backend/schema/catalog/schema.h" +#include "backend/schema/updater/schema_updater.h" #include "backend/transaction/options.h" #include "common/errors.h" -#include "tests/common/actions.h" -#include "tests/common/schema_constructor.h" #include "absl/status/status.h" #include "zetasql/base/status_macros.h" @@ -53,23 +52,25 @@ class BackfillTest : public ::testing::Test { int num_succesful; absl::Status backfill_status; absl::Time update_time; - ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema(update_statements, &num_succesful, - &update_time, &backfill_status)); + ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema( + SchemaChangeOperation{.statements = update_statements}, &num_succesful, + &update_time, &backfill_status)); return backfill_status; } protected: void SetUp() override { - std::vector create_statements; - create_statements.push_back(R"( + std::vector create_statements = {R"( CREATE TABLE TestTable ( int64_col INT64, string_col STRING(MAX), another_string_col STRING(MAX) ) PRIMARY KEY (int64_col) - )"); - ZETASQL_ASSERT_OK_AND_ASSIGN(database_, - Database::Create(&clock_, create_statements)); + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + database_, + Database::Create( + &clock_, SchemaChangeOperation{.statements = create_statements})); index_update_statements_.push_back(R"( CREATE UNIQUE NULL_FILTERED INDEX TestIndex ON diff --git a/backend/schema/ddl/operations.proto b/backend/schema/ddl/operations.proto index d430ae64..fc033d51 100644 --- a/backend/schema/ddl/operations.proto +++ b/backend/schema/ddl/operations.proto @@ -253,14 +253,18 @@ message DropDatabase { message Analyze {} + message DDLStatement { oneof kind { CreateTable create_table = 1; CreateIndex create_index = 2; + AlterTable alter_table = 3; AlterIndex alter_index = 5; + DropTable drop_table = 6; DropIndex drop_index = 8; + Analyze analyze = 10; } } diff --git a/backend/schema/parser/BUILD b/backend/schema/parser/BUILD index 95192bbb..485d8ad1 100644 --- a/backend/schema/parser/BUILD +++ b/backend/schema/parser/BUILD @@ -15,7 +15,7 @@ # load( - "//backend/schema/parser:javacc_parser.bzl", + "//backend/common:javacc_parser.bzl", "generate_javacc_parser", ) diff --git a/backend/schema/parser/ddl_parser.jjt b/backend/schema/parser/ddl_parser.jjt index 56f23d05..0f6b576d 100644 --- a/backend/schema/parser/ddl_parser.jjt +++ b/backend/schema/parser/ddl_parser.jjt @@ -77,6 +77,7 @@ void create_statement() #void : ( create_database_statement() | create_table_statement() | create_index_statement() + ) } @@ -104,6 +105,7 @@ void create_table_statement() : [ LOOKAHEAD(2) "," row_deletion_policy_clause() ] } + void table_element() #void : {} { @@ -136,7 +138,7 @@ void column_def_alter() : LOOKAHEAD(2) ( options_clause() | column_default_clause() ) - | #drop_default_clause + | LOOKAHEAD(2) #drop_default_clause | column_def_alter_attrs() } @@ -154,6 +156,7 @@ void column_type() : | | "<" column_type() ">" + } void column_length() : @@ -312,7 +315,10 @@ void option_key_val() : void drop_statement() : {} { - ( #table | #index ) identifier() #name + + + (
#table | #index ) identifier() #name + } void alter_statement() #void : @@ -321,6 +327,7 @@ void alter_statement() #void : ( alter_database_statement() | alter_table_statement() + ) } @@ -334,22 +341,23 @@ void alter_table_statement() : {} {
(identifier() #table_name) - ( LOOKAHEAD(3) #add_column [LOOKAHEAD(2) ] column_def() - | LOOKAHEAD(3) #drop_constraint + ( LOOKAHEAD(3) #drop_constraint identifier() #constraint_name | LOOKAHEAD(3) #drop_row_deletion_policy | LOOKAHEAD(3) #drop_column [LOOKAHEAD(2) ] (identifier() #column_name) - | LOOKAHEAD(3) #drop_row_deletion_policy - | LOOKAHEAD(3) #alter_column [LOOKAHEAD(2) ] + | LOOKAHEAD(1) #alter_column [LOOKAHEAD(2) ] (identifier() #name) column_def_alter() | #set_on_delete on_delete_clause() | LOOKAHEAD(4) foreign_key() - | LOOKAHEAD(2) check_constraint() - | LOOKAHEAD(2) row_deletion_policy_clause() #add_row_deletion_policy + | LOOKAHEAD(4) check_constraint() + | LOOKAHEAD(4) row_deletion_policy_clause() #add_row_deletion_policy + // ADD COLUMN could mask the above ADD clauses, so putting it at the end. + | LOOKAHEAD(1) #add_column [LOOKAHEAD(3) ] column_def() | row_deletion_policy_clause() #replace_row_deletion_policy ) } + void analyze_statement() : {} { diff --git a/backend/schema/parser/ddl_parser_test.cc b/backend/schema/parser/ddl_parser_test.cc index 7bdf9f5a..4b8db94d 100644 --- a/backend/schema/parser/ddl_parser_test.cc +++ b/backend/schema/parser/ddl_parser_test.cc @@ -75,12 +75,12 @@ TEST(ParseCreateTable, CanParseCreateTableWithNoColumns) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" constraints { primary_key {} } } - )"))); + )pb"))); } TEST(ParseCreateTable, CannotParseCreateTableWithoutName) { @@ -112,7 +112,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyAKeyColumn) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -124,7 +124,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyAKeyColumn) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithOnlyAKeyColumnTrailingComma) { @@ -135,7 +135,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyAKeyColumnTrailingComma) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -147,7 +147,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyAKeyColumnTrailingComma) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumn) { @@ -158,7 +158,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumn) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -167,7 +167,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumn) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumnTrailingComma) { @@ -178,7 +178,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumnTrailingComma) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -187,7 +187,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithOnlyANonKeyColumnTrailingComma) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithKeyAndNonKeyColumns) { @@ -199,7 +199,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithKeyAndNonKeyColumns) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -215,7 +215,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithKeyAndNonKeyColumns) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumns) { @@ -227,7 +227,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumns) { ) PRIMARY KEY (UserId, Name) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -247,7 +247,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumns) { } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithTwoNonKeyColumns) { @@ -259,7 +259,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoNonKeyColumns) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -272,7 +272,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoNonKeyColumns) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumnsAndANonKeyColumn) { @@ -285,7 +285,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumnsAndANonKeyColumn) { ) PRIMARY KEY (UserId, Name) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -309,7 +309,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithTwoKeyColumnsAndANonKeyColumn) { } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithAKeyColumnAndTwoNonKeyColumns) { @@ -322,7 +322,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithAKeyColumnAndTwoNonKeyColumns) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -342,7 +342,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithAKeyColumnAndTwoNonKeyColumns) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateInterleavedTableWithNoColumns) { @@ -352,7 +352,7 @@ TEST(ParseCreateTable, CanParseCreateInterleavedTableWithNoColumns) { ) PRIMARY KEY (), INTERLEAVE IN PARENT Users ON DELETE CASCADE )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Albums" constraints { primary_key {} } @@ -364,7 +364,7 @@ TEST(ParseCreateTable, CanParseCreateInterleavedTableWithNoColumns) { } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateInterleavedTableWithKeyAndNonKeyColumns) { @@ -379,7 +379,7 @@ TEST(ParseCreateTable, CanParseCreateInterleavedTableWithKeyAndNonKeyColumns) { INTERLEAVE IN PARENT Users ON DELETE CASCADE )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Albums" columns { @@ -416,7 +416,7 @@ TEST(ParseCreateTable, CanParseCreateInterleavedTableWithKeyAndNonKeyColumns) { } } } - )"))); + )pb"))); } TEST(ParseCreateTable, @@ -427,7 +427,7 @@ TEST(ParseCreateTable, ) PRIMARY KEY (), INTERLEAVE IN PARENT Users ON DELETE NO ACTION )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Albums" constraints { primary_key {} } @@ -439,7 +439,7 @@ TEST(ParseCreateTable, } } } - )"))); + )pb"))); } TEST(ParseCreateTable, @@ -450,7 +450,7 @@ TEST(ParseCreateTable, ) PRIMARY KEY (), INTERLEAVE IN PARENT Users )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Albums" constraints { primary_key {} } @@ -462,7 +462,7 @@ TEST(ParseCreateTable, } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithAnArrayField) { @@ -474,7 +474,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithAnArrayField) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -496,7 +496,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithAnArrayField) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithNotNullArrayField) { @@ -508,7 +508,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithNotNullArrayField) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -530,7 +530,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithNotNullArrayField) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithoutInterleaveClause) { @@ -542,7 +542,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithoutInterleaveClause) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -558,7 +558,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithoutInterleaveClause) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithForeignKeys) { @@ -739,7 +739,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithNumeric) { ) PRIMARY KEY (K) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "T" columns { @@ -762,7 +762,7 @@ TEST(ParseCreateTable, CanParseCreateTableWithNumeric) { } constraints { primary_key { key_part { key_column_name: "K" } } } } - )"))); + )pb"))); } TEST(ParseCreateTable, CanParseCreateTableWithRowDeletionPolicy) { @@ -1297,7 +1297,7 @@ TEST(ParseAlterTable, CanParseAlterColumn) { ALTER TABLE Users ALTER COLUMN Notes STRING(MAX) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Users" alter_column { @@ -1309,7 +1309,7 @@ TEST(ParseAlterTable, CanParseAlterColumn) { } } } - )"))); + )pb"))); } TEST(ParseAlterTable, CanParseAlterColumnNotNull) { @@ -1318,7 +1318,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNotNull) { ALTER TABLE Users ALTER COLUMN Notes STRING(MAX) NOT NULL )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Users" alter_column { @@ -1331,7 +1331,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNotNull) { } } } - )"))); + )pb"))); } TEST(ParseAlterTable, CanParseAlterColumnNamedColumn) { @@ -1341,7 +1341,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNamedColumn) { ALTER TABLE Users ALTER COLUMN `COLUMN` STRING(MAX) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Users" alter_column { @@ -1353,7 +1353,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNamedColumn) { } } } - )"))); + )pb"))); // Columns named "COLUMN" can be modified even without quotes. EXPECT_THAT(ParseDDLStatement( @@ -1361,7 +1361,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNamedColumn) { ALTER TABLE Users ALTER COLUMN COLUMN STRING(MAX) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Users" alter_column { @@ -1373,7 +1373,7 @@ TEST(ParseAlterTable, CanParseAlterColumnNamedColumn) { } } } - )"))); + )pb"))); } TEST(ParseAlterTable, CannotParseAlterColumnMissingColumnName) { @@ -1430,7 +1430,7 @@ TEST(ParseAlterTable, CanParseSetOnDeleteNoAction) { ALTER TABLE Albums SET ON DELETE NO ACTION )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Albums" alter_constraint { @@ -1438,7 +1438,7 @@ TEST(ParseAlterTable, CanParseSetOnDeleteNoAction) { constraint { interleave { on_delete { action: NO_ACTION } } } } } - )"))); + )pb"))); } TEST(ParseAlterTable, CanParseAlterTableWithRowDeletionPolicy) { @@ -1506,12 +1506,12 @@ TEST(Miscellaneous, CanParseExtraWhitespaceCharacters) { CREATE TABLE Users () PRIMARY KEY() )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" constraints { primary_key {} } } - )"))); + )pb"))); } TEST(Miscellaneous, CannotParseSmartQuotes) { @@ -1535,7 +1535,7 @@ TEST(Miscellaneous, CanParseMixedCaseStatements) { ) PRIMARY KEY (UserId) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -1551,7 +1551,7 @@ TEST(Miscellaneous, CanParseMixedCaseStatements) { primary_key { key_part { key_column_name: "UserId" } } } } - )"))); + )pb"))); EXPECT_THAT(ParseDDLStatement( R"( @@ -1564,7 +1564,7 @@ TEST(Miscellaneous, CanParseMixedCaseStatements) { INTERLEAVE in PARENT Users ON DELETE CASCADE )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Albums" columns { @@ -1601,7 +1601,7 @@ TEST(Miscellaneous, CanParseMixedCaseStatements) { } } } - )"))); + )pb"))); } TEST(Miscellaneous, CanParseCustomFieldLengths) { @@ -1617,7 +1617,7 @@ TEST(Miscellaneous, CanParseCustomFieldLengths) { ) PRIMARY KEY (Name) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Sizes" columns { @@ -1648,7 +1648,7 @@ TEST(Miscellaneous, CanParseCustomFieldLengths) { primary_key { key_part { key_column_name: "Name" } } } } - )"))); + )pb"))); } TEST(Miscellaneous, CanParseTimestamps) { @@ -1661,7 +1661,7 @@ TEST(Miscellaneous, CanParseTimestamps) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Sizes" columns { @@ -1678,7 +1678,7 @@ TEST(Miscellaneous, CanParseTimestamps) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(Miscellaneous, CannotParseStringFieldsWithoutLength) { @@ -1713,7 +1713,7 @@ TEST(Miscellaneous, CanParseQuotedIdentifiers) { ) PRIMARY KEY (`C`) )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "T" columns { @@ -1723,7 +1723,7 @@ TEST(Miscellaneous, CanParseQuotedIdentifiers) { } constraints { primary_key { key_part { key_column_name: "C" } } } } - )"))); + )pb"))); } // AllowCommitTimestamp @@ -1739,7 +1739,7 @@ TEST(AllowCommitTimestamp, CanParseSingleOption) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -1751,7 +1751,7 @@ TEST(AllowCommitTimestamp, CanParseSingleOption) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(AllowCommitTimestamp, CanClearOptionWithNull) { @@ -1765,7 +1765,7 @@ TEST(AllowCommitTimestamp, CanClearOptionWithNull) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -1777,7 +1777,7 @@ TEST(AllowCommitTimestamp, CanClearOptionWithNull) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(AllowCommitTimestamp, CannotParseSingleInvalidOption) { @@ -1818,7 +1818,7 @@ TEST(AllowCommitTimestamp, CanParseMultipleOptions) { ) PRIMARY KEY () )"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( create_table { table_name: "Users" columns { @@ -1838,7 +1838,7 @@ TEST(AllowCommitTimestamp, CanParseMultipleOptions) { } constraints { primary_key {} } } - )"))); + )pb"))); } TEST(AllowCommitTimestamp, CannotParseMultipleOptionsWithTrailingComma) { @@ -1859,7 +1859,7 @@ TEST(AllowCommitTimestamp, SetThroughOptions) { ALTER TABLE Users ALTER COLUMN UpdateTs SET OPTIONS (allow_commit_timestamp = true))"), IsOkAndHolds(test::EqualsProto( - R"( + R"pb( alter_table { table_name: "Users" alter_column { @@ -1876,7 +1876,7 @@ TEST(AllowCommitTimestamp, SetThroughOptions) { } } } - )"))); + )pb"))); } TEST(AllowCommitTimestamp, CannotParseInvalidOptionValue) { @@ -2636,7 +2636,7 @@ TEST_F(CheckConstraint, ParseSyntaxErrorsInCheckConstraint) { EXPECT_THAT( ParseDDLStatement("ALTER TABLE T ADD CONSTRAINT GROUPS CHECK(B > `A`))"), StatusIs(absl::StatusCode::kInvalidArgument, - HasSubstr("Encountered 'GROUPS' while parsing: identifier"))); + HasSubstr("Encountered 'GROUPS' while parsing"))); EXPECT_THAT(ParseDDLStatement("ALTER TABLE T ADD CHECK(()"), StatusIs(absl::StatusCode::kInvalidArgument, diff --git a/backend/schema/parser/ddl_reserved_words.cc b/backend/schema/parser/ddl_reserved_words.cc index 199156dc..52512ab1 100644 --- a/backend/schema/parser/ddl_reserved_words.cc +++ b/backend/schema/parser/ddl_reserved_words.cc @@ -148,6 +148,7 @@ static const CaseInsensitiveStringSet* const pseudo_reserved_words = "FLOAT64", "FOREIGN", "INDEX", + "INSERT", "INT64", "INTERLEAVE", "JSON", @@ -168,6 +169,7 @@ static const CaseInsensitiveStringSet* const pseudo_reserved_words = "TABLE", "TIMESTAMP", "UNIQUE", + "UPDATE", }; } // namespace diff --git a/backend/schema/updater/BUILD b/backend/schema/updater/BUILD index 0bd61c14..c73b36b0 100644 --- a/backend/schema/updater/BUILD +++ b/backend/schema/updater/BUILD @@ -100,7 +100,6 @@ cc_library( ":global_schema_names", ":schema_validation_context", "//backend/common:ids", - "//backend/datamodel:types", "//backend/query:analyzer_options", "//backend/query:catalog", "//backend/query:query_engine_options", @@ -112,15 +111,12 @@ cc_library( "//backend/schema/graph:schema_graph", "//backend/schema/graph:schema_graph_editor", "//backend/schema/parser:ddl_parser", - "//backend/schema/parser:javacc_ddl_parser", "//backend/schema/verifiers:check_constraint_verifiers", "//backend/schema/verifiers:foreign_key_verifiers", - "//common:constants", "//common:errors", "//common:limits", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_set", - "@com_google_absl//absl/memory", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", diff --git a/backend/schema/updater/schema_updater.cc b/backend/schema/updater/schema_updater.cc index 8bf8a612..f4fdee8c 100644 --- a/backend/schema/updater/schema_updater.cc +++ b/backend/schema/updater/schema_updater.cc @@ -36,7 +36,6 @@ #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/types/optional.h" -#include "backend/datamodel/types.h" #include "backend/query/analyzer_options.h" #include "backend/query/catalog.h" #include "backend/query/query_engine_options.h" @@ -55,14 +54,12 @@ #include "backend/schema/ddl/operations.pb.h" #include "backend/schema/graph/schema_graph.h" #include "backend/schema/graph/schema_graph_editor.h" -#include "backend/schema/parser/DDLParser.h" #include "backend/schema/parser/ddl_parser.h" #include "backend/schema/updater/ddl_type_conversion.h" #include "backend/schema/updater/global_schema_names.h" #include "backend/schema/updater/schema_validation_context.h" #include "backend/schema/verifiers/check_constraint_verifiers.h" #include "backend/schema/verifiers/foreign_key_verifiers.h" -#include "common/constants.h" #include "common/errors.h" #include "common/limits.h" #include "zetasql/base/status_macros.h" @@ -112,7 +109,7 @@ class SchemaUpdaterImpl { // Apply DDL statements returning the SchemaValidationContext containing // the schema change actions resulting from each statement. absl::StatusOr> ApplyDDLStatements( - absl::Span statements); + const SchemaChangeOperation& schema_change_operation); std::vector> GetIntermediateSchemas() { return std::move(intermediate_schemas_); @@ -395,10 +392,10 @@ SchemaUpdaterImpl::ApplyDDLStatement(absl::string_view statement) { absl::StatusOr> SchemaUpdaterImpl::ApplyDDLStatements( - absl::Span statements) { + const SchemaChangeOperation& schema_change_operation) { std::vector pending_work; - for (const auto& statement : statements) { + for (const auto& statement : schema_change_operation.statements) { ZETASQL_VLOG(2) << "Applying statement " << statement; SchemaValidationContext statement_context{ storage_, &global_names_, type_factory_, schema_change_timestamp_}; @@ -1670,9 +1667,9 @@ const Schema* SchemaUpdater::EmptySchema() { } absl::StatusOr> -SchemaUpdater::ValidateSchemaFromDDL(absl::Span statements, - const SchemaChangeContext& context, - const Schema* existing_schema) { +SchemaUpdater::ValidateSchemaFromDDL( + const SchemaChangeOperation& schema_change_operation, + const SchemaChangeContext& context, const Schema* existing_schema) { if (existing_schema == nullptr) { existing_schema = EmptySchema(); } @@ -1681,7 +1678,8 @@ SchemaUpdater::ValidateSchemaFromDDL(absl::Span statements, context.type_factory, context.table_id_generator, context.column_id_generator, context.storage, context.schema_change_timestamp, existing_schema)); - ZETASQL_ASSIGN_OR_RETURN(pending_work_, updater.ApplyDDLStatements(statements)); + ZETASQL_ASSIGN_OR_RETURN(pending_work_, + updater.ApplyDDLStatements(schema_change_operation)); intermediate_schemas_ = updater.GetIntermediateSchemas(); std::unique_ptr new_schema = nullptr; @@ -1704,14 +1702,16 @@ absl::Status SchemaUpdater::RunPendingActions(int* num_succesful) { } absl::StatusOr SchemaUpdater::UpdateSchemaFromDDL( - const Schema* existing_schema, absl::Span statements, + const Schema* existing_schema, + const SchemaChangeOperation& schema_change_operation, const SchemaChangeContext& context) { ZETASQL_ASSIGN_OR_RETURN(SchemaUpdaterImpl updater, SchemaUpdaterImpl::Build( context.type_factory, context.table_id_generator, context.column_id_generator, context.storage, context.schema_change_timestamp, existing_schema)); - ZETASQL_ASSIGN_OR_RETURN(pending_work_, updater.ApplyDDLStatements(statements)); + ZETASQL_ASSIGN_OR_RETURN(pending_work_, + updater.ApplyDDLStatements(schema_change_operation)); intermediate_schemas_ = updater.GetIntermediateSchemas(); // Use the schema snapshot for the last succesful statement. @@ -1731,10 +1731,12 @@ absl::StatusOr SchemaUpdater::UpdateSchemaFromDDL( } absl::StatusOr> -SchemaUpdater::CreateSchemaFromDDL(absl::Span statements, - const SchemaChangeContext& context) { - ZETASQL_ASSIGN_OR_RETURN(SchemaChangeResult result, - UpdateSchemaFromDDL(EmptySchema(), statements, context)); +SchemaUpdater::CreateSchemaFromDDL( + const SchemaChangeOperation& schema_change_operation, + const SchemaChangeContext& context) { + ZETASQL_ASSIGN_OR_RETURN( + SchemaChangeResult result, + UpdateSchemaFromDDL(EmptySchema(), schema_change_operation, context)); ZETASQL_RETURN_IF_ERROR(result.backfill_status); return std::move(result.updated_schema); } diff --git a/backend/schema/updater/schema_updater.h b/backend/schema/updater/schema_updater.h index 62282e85..1ab2c028 100644 --- a/backend/schema/updater/schema_updater.h +++ b/backend/schema/updater/schema_updater.h @@ -18,9 +18,10 @@ #define THIRD_PARTY_CLOUD_SPANNER_EMULATOR_BACKEND_SCHEMA_UPDATER_SCHEMA_UPDATER_H_ #include +#include +#include #include "zetasql/public/type.h" -#include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" @@ -37,6 +38,11 @@ namespace backend { static constexpr char kIndexDataTablePrefix[] = "_index_data_table_"; +// Container holding all the required inputs for processing a schema change. +struct SchemaChangeOperation { + absl::Span statements; +}; + // Database context within which a schema change is processed. struct SchemaChangeContext { // Type factory for the database. @@ -74,18 +80,19 @@ class SchemaUpdater { public: SchemaUpdater() = default; - // Creates a new Schema from `statements` or returns the error encountered - // while applying the first invalid statement. Also runs any backfill or - // data-dependent verification tasks resulting from the new schema such as - // creation of a new index. However, since the database will not contain any - // data at this point, none of the backfill tasks are expected to fail. + // Creates a new Schema from `schema_change_operation.statements` or returns + // the error encountered while applying the first invalid statement. Also runs + // any backfill or data-dependent verification tasks resulting from the new + // schema such as creation of a new index. However, since the database will + // not contain any data at this point, none of the backfill tasks are expected + // to fail. absl::StatusOr> CreateSchemaFromDDL( - absl::Span statements, + const SchemaChangeOperation& schema_change_operation, const SchemaChangeContext& context); - // Applies the DDL statements in `statements` on top of `existing_schema`. Any - // errors during semantic validation of the provided `statements` are - // communicated through the return status of the function. + // Applies the DDL statements in `schema_change_operation.statements` on top + // of `existing_schema`. Any errors during semantic validation of the provided + // `statements` are communicated through the return status of the function. // // If the set of statements is semantically valid, but results in a schema // verification/backfill error, then that is communicated through the returned @@ -94,14 +101,15 @@ class SchemaUpdater { // successfully applied statements returned in the `updated_schema` and // `num_successful_statements` members respectively. absl::StatusOr UpdateSchemaFromDDL( - const Schema* existing_schema, absl::Span statements, + const Schema* existing_schema, + const SchemaChangeOperation& schema_change_operation, const SchemaChangeContext& context); // Validates the given set DDL statements, producing a new schema with the // DDL statements applied. Does not run any backfill/verification tasks // entailed by `statements`. absl::StatusOr> ValidateSchemaFromDDL( - absl::Span statements, + const SchemaChangeOperation& schema_change_operation, const SchemaChangeContext& context, const Schema* existing_schema = nullptr); diff --git a/backend/schema/updater/schema_updater_tests/base.cc b/backend/schema/updater/schema_updater_tests/base.cc index 4a266590..9de4df01 100644 --- a/backend/schema/updater/schema_updater_tests/base.cc +++ b/backend/schema/updater/schema_updater_tests/base.cc @@ -20,6 +20,7 @@ #include #include "absl/status/statusor.h" +#include "backend/schema/updater/schema_updater.h" namespace google { namespace spanner { @@ -38,7 +39,8 @@ absl::StatusOr> SchemaUpdaterTest::UpdateSchema( SchemaChangeContext context{.type_factory = &type_factory_, .table_id_generator = &table_id_generator_, .column_id_generator = &column_id_generator_}; - return updater.ValidateSchemaFromDDL(statements, context, base_schema); + return updater.ValidateSchemaFromDDL( + SchemaChangeOperation{.statements = statements}, context, base_schema); } } // namespace test diff --git a/backend/schema/verifiers/BUILD b/backend/schema/verifiers/BUILD index e83bc28b..ca8d6fae 100644 --- a/backend/schema/verifiers/BUILD +++ b/backend/schema/verifiers/BUILD @@ -54,16 +54,10 @@ cc_test( deps = [ ":column_value_verifiers", "//backend/database", - "//backend/schema/builders:schema_builders", - "//backend/schema/catalog:schema", - "//backend/schema/updater:schema_validation_context", - "//backend/storage:in_memory_storage", "//common:clock", "//common:errors", "//tests/common:proto_matchers", "@com_github_grpc_grpc//:grpc++", - "@com_google_absl//absl/memory", - "@com_google_absl//absl/time", "@com_google_absl//absl/types:span", "@com_google_googletest//:gtest_main", "@com_google_zetasql//zetasql/base/testing:status_matchers", @@ -99,6 +93,7 @@ cc_test( "//tests/common:proto_matchers", "//tests/common:scoped_feature_flags_setter", "@com_github_grpc_grpc//:grpc++", + "@com_google_absl//absl/status", "@com_google_googletest//:gtest_main", "@com_google_zetasql//zetasql/base/testing:status_matchers", "@com_google_zetasql//zetasql/public:value", diff --git a/backend/schema/verifiers/check_constraint_verifiers_test.cc b/backend/schema/verifiers/check_constraint_verifiers_test.cc index 85338681..9ee4573c 100644 --- a/backend/schema/verifiers/check_constraint_verifiers_test.cc +++ b/backend/schema/verifiers/check_constraint_verifiers_test.cc @@ -18,6 +18,7 @@ #include #include +#include #include "zetasql/public/value.h" #include "gmock/gmock.h" @@ -29,6 +30,7 @@ #include "backend/transaction/read_write_transaction.h" #include "common/clock.h" #include "tests/common/scoped_feature_flags_setter.h" +#include "zetasql/base/status_macros.h" namespace google { namespace spanner { @@ -48,12 +50,15 @@ class CheckConstraintVerifiersTest : public ::testing::Test { protected: void SetUp() override { - ZETASQL_ASSERT_OK_AND_ASSIGN(database_, Database::Create(&clock_, {R"( + std::vector statements = {R"( CREATE TABLE T ( A INT64, B INT64, C INT64 AS (A + B) STORED, - ) PRIMARY KEY(A))"})); + ) PRIMARY KEY(A))"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + database_, Database::Create(&clock_, SchemaChangeOperation{ + .statements = statements})); ZETASQL_ASSERT_OK_AND_ASSIGN(std::unique_ptr txn, database_->CreateReadWriteTransaction( @@ -81,7 +86,8 @@ class CheckConstraintVerifiersTest : public ::testing::Test { absl::Status status; absl::Time timestamp; ZETASQL_RETURN_IF_ERROR( - database_->UpdateSchema(statements, &succesful, ×tamp, &status)); + database_->UpdateSchema(SchemaChangeOperation{.statements = statements}, + &succesful, ×tamp, &status)); return status; } diff --git a/backend/schema/verifiers/column_value_verifiers_test.cc b/backend/schema/verifiers/column_value_verifiers_test.cc index 7444c2b4..27ddc14a 100644 --- a/backend/schema/verifiers/column_value_verifiers_test.cc +++ b/backend/schema/verifiers/column_value_verifiers_test.cc @@ -18,6 +18,7 @@ #include #include +#include #include "zetasql/public/type.h" #include "zetasql/public/value.h" @@ -25,15 +26,8 @@ #include "gtest/gtest.h" #include "zetasql/base/testing/status_matchers.h" #include "tests/common/proto_matchers.h" -#include "absl/memory/memory.h" -#include "absl/time/time.h" #include "absl/types/span.h" #include "backend/database/database.h" -#include "backend/schema/builders/column_builder.h" -#include "backend/schema/catalog/column.h" -#include "backend/schema/catalog/table.h" -#include "backend/schema/updater/schema_validation_context.h" -#include "backend/storage/in_memory_storage.h" #include "common/clock.h" #include "common/errors.h" @@ -52,19 +46,20 @@ using zetasql::values::Timestamp; class ColumnValueVerifiersTest : public ::testing::Test { public: - ColumnValueVerifiersTest() {} + ColumnValueVerifiersTest() = default; absl::Status UpdateSchema(absl::Span update_statements) { int num_succesful; absl::Status backfill_status; absl::Time update_time; - ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema(update_statements, &num_succesful, - &update_time, &backfill_status)); + ZETASQL_RETURN_IF_ERROR(database_->UpdateSchema( + SchemaChangeOperation{.statements = update_statements}, &num_succesful, + &update_time, &backfill_status)); return backfill_status; } void SetUp() override { - ZETASQL_ASSERT_OK_AND_ASSIGN(database_, Database::Create(&clock_, {R"( + std::vector statements = {R"( CREATE TABLE TestTable ( int64_col INT64, string_col STRING(30), @@ -72,7 +67,10 @@ class ColumnValueVerifiersTest : public ::testing::Test { bytes_array_col ARRAY, timestamp_col TIMESTAMP ) PRIMARY KEY (int64_col) - )"})); + )"}; + ZETASQL_ASSERT_OK_AND_ASSIGN( + database_, Database::Create(&clock_, SchemaChangeOperation{ + .statements = statements})); ZETASQL_ASSERT_OK_AND_ASSIGN(std::unique_ptr txn, database_->CreateReadWriteTransaction( diff --git a/backend/schema/verifiers/foreign_key_verifiers_test.cc b/backend/schema/verifiers/foreign_key_verifiers_test.cc index b202f3dc..6d84f39f 100644 --- a/backend/schema/verifiers/foreign_key_verifiers_test.cc +++ b/backend/schema/verifiers/foreign_key_verifiers_test.cc @@ -70,7 +70,9 @@ class ForeignKeyVerifiersTest : public ::testing::Test { } absl::Status CreateDatabase(const std::vector& statements) { - ZETASQL_ASSIGN_OR_RETURN(database_, Database::Create(&clock_, statements)); + ZETASQL_ASSIGN_OR_RETURN(database_, + Database::Create(&clock_, SchemaChangeOperation{ + .statements = statements})); return absl::OkStatus(); } @@ -79,7 +81,8 @@ class ForeignKeyVerifiersTest : public ::testing::Test { absl::Status status; absl::Time timestamp; ZETASQL_RETURN_IF_ERROR( - database_->UpdateSchema(statements, &succesful, ×tamp, &status)); + database_->UpdateSchema(SchemaChangeOperation{.statements = statements}, + &succesful, ×tamp, &status)); return status; } diff --git a/build/bazel/zetasql.patch b/build/bazel/zetasql.patch index 0fc15fbd..0a2a253f 100644 --- a/build/bazel/zetasql.patch +++ b/build/bazel/zetasql.patch @@ -22,13 +22,3 @@ import operator import re ---- zetasql/resolved_ast/BUILD -+++ zetasql/resolved_ast/BUILD -@@ -33,6 +33,7 @@ py_binary( - "@markupsafe//:markupsafe", - "//zetasql/parser:generator_utils", - ], -+ python_version = "PY3", - ) - - gen_resolved_ast_files( diff --git a/build/docker/Dockerfile.ubuntu b/build/docker/Dockerfile.ubuntu index 8fcdfbc2..f2b33ff2 100644 --- a/build/docker/Dockerfile.ubuntu +++ b/build/docker/Dockerfile.ubuntu @@ -34,18 +34,8 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test && \ --slave /usr/bin/g++ g++ /usr/bin/g++-8 && \ update-alternatives --set gcc /usr/bin/gcc-8 -# Copy over the emulator code base into the container. We explicitly copy only -# the required files to maximize chances that the layer will be cached. There -# does not seem to be a nicer way to do this than multiple COPY commands as -# COPY copies the contents of the source, not the directory itself. -COPY BUILD.bazel WORKSPACE maven_install.json .bazelrc src/ -COPY common src/common/ -COPY gateway src/gateway/ -COPY frontend src/frontend/ -COPY backend src/backend/ -COPY binaries src/binaries/ -COPY tests src/tests/ -COPY build/bazel src/build/bazel/ +# Copy over the emulator code base into the container. +COPY . src/ # Build the emulator. RUN cd src && \ diff --git a/build/kokoro/gcp_ubuntu/continuous.cfg b/build/kokoro/gcp_ubuntu/continuous_cpp.cfg similarity index 95% rename from build/kokoro/gcp_ubuntu/continuous.cfg rename to build/kokoro/gcp_ubuntu/continuous_cpp.cfg index 9cdb4af8..73f15fe4 100644 --- a/build/kokoro/gcp_ubuntu/continuous.cfg +++ b/build/kokoro/gcp_ubuntu/continuous_cpp.cfg @@ -23,7 +23,7 @@ build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" # This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). env_vars { key: "CLIENT_INTEGRATION_TESTS" - value: "nodejs,java,go,cpp,py,php,ruby,csharp" + value: "cpp" } action { diff --git a/build/kokoro/gcp_ubuntu/continuous_csharp.cfg b/build/kokoro/gcp_ubuntu/continuous_csharp.cfg new file mode 100644 index 00000000..179d281c --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_csharp.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "csharp" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_go.cfg b/build/kokoro/gcp_ubuntu/continuous_go.cfg new file mode 100644 index 00000000..e161e3e9 --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_go.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "go" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_java.cfg b/build/kokoro/gcp_ubuntu/continuous_java.cfg new file mode 100644 index 00000000..00b666d9 --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_java.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "java" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_nodejs.cfg b/build/kokoro/gcp_ubuntu/continuous_nodejs.cfg new file mode 100644 index 00000000..f1e1d7ef --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_nodejs.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "nodejs" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_php.cfg b/build/kokoro/gcp_ubuntu/continuous_php.cfg new file mode 100644 index 00000000..cc18e8d8 --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_php.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "php" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_py.cfg b/build/kokoro/gcp_ubuntu/continuous_py.cfg new file mode 100644 index 00000000..fcb915c1 --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_py.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "py" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/continuous_ruby.cfg b/build/kokoro/gcp_ubuntu/continuous_ruby.cfg new file mode 100644 index 00000000..d2e1b523 --- /dev/null +++ b/build/kokoro/gcp_ubuntu/continuous_ruby.cfg @@ -0,0 +1,35 @@ +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Format: //devtools/kokoro/config/proto/build.proto + +# For non-piper scms, the prefix directory name in this path is determined by +# the scm name specified in the job configuration. +build_file: "cloud-spanner-emulator/build/kokoro/gcp_ubuntu/docker_test.sh" + +# This is a comma separated list of all client integration tests that will be run (e.g. go,py,java,...). +env_vars { + key: "CLIENT_INTEGRATION_TESTS" + value: "ruby" +} + +action { + define_artifacts { + regex: "bin/**" + regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.log" + } +} diff --git a/build/kokoro/gcp_ubuntu/docker_test.sh b/build/kokoro/gcp_ubuntu/docker_test.sh index 5869662e..bba461d4 100755 --- a/build/kokoro/gcp_ubuntu/docker_test.sh +++ b/build/kokoro/gcp_ubuntu/docker_test.sh @@ -108,7 +108,7 @@ IFS=',' for client in $CLIENT_INTEGRATION_TESTS do if [[ $client == "go" ]]; then - SHA=734a22d5f6ecb1b1ec86302c7e38c461efd2260c3de29b6ad9e29df79632bed7 + SHA=4f813b29e595e0069502abc59d7528c2a6f96d35c4e864ccbad1897e7e22261e elif [[ $client == "java" ]]; then SHA=b0049515bf933ed93b9956f781a0126070b5690ef7f3800e048f063ee4ab02c7 elif [[ $client == "cpp" ]]; then diff --git a/common/BUILD b/common/BUILD index ebc79ef1..b1e8b93d 100644 --- a/common/BUILD +++ b/common/BUILD @@ -60,8 +60,11 @@ cc_library( ":constants", ":limits", "//backend/common:ids", + "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/status", "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:cord", + "@com_google_absl//absl/time", "@com_google_googleapis//google/rpc:error_details_cc_proto", ], ) diff --git a/common/errors.cc b/common/errors.cc index 0b38cb39..76b16e72 100644 --- a/common/errors.cc +++ b/common/errors.cc @@ -21,14 +21,15 @@ #include "google/rpc/error_details.pb.h" #include "absl/status/status.h" +#include "absl/strings/cord.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" +#include "absl/time/time.h" #include "backend/common/ids.h" #include "common/constants.h" #include "common/limits.h" -#include "absl/status/status.h" namespace google { namespace spanner { @@ -112,6 +113,20 @@ absl::Status InvalidInstanceName(absl::string_view instance_id) { instance_id)); } +absl::Status InvalidCreateInstanceRequestUnitsNotBoth() { + return absl::Status(absl::StatusCode::kInvalidArgument, + "Invalid CreateInstance request. Only one of nodes or " + "processing units should be specified."); +} + +absl::Status InvalidCreateInstanceRequestUnitsMultiple() { + return absl::Status( + absl::StatusCode::kInvalidArgument, + "Invalid CreateInstance request. Processing units should be " + "multiple of 100 for values below 1000 and multiples of " + "1000 for values above 1000."); +} + // Database errors. absl::Status InvalidDatabaseURI(absl::string_view uri) { return absl::Status(absl::StatusCode::kInvalidArgument, @@ -2244,7 +2259,6 @@ absl::Status ForeignKeyRowDeletionPolicyAddNotAllowed( "referenced by one or more foreign keys: `$1`.", table_name, foreign_keys)); } - } // namespace error } // namespace emulator } // namespace spanner diff --git a/common/errors.h b/common/errors.h index 26c52537..5221060b 100644 --- a/common/errors.h +++ b/common/errors.h @@ -17,10 +17,13 @@ #ifndef THIRD_PARTY_CLOUD_SPANNER_EMULATOR_COMMON_ERRORS_H_ #define THIRD_PARTY_CLOUD_SPANNER_EMULATOR_COMMON_ERRORS_H_ +#include +#include + #include "absl/status/status.h" #include "absl/strings/string_view.h" +#include "absl/time/time.h" #include "backend/common/ids.h" -#include "absl/status/status.h" namespace google { namespace spanner { @@ -46,6 +49,8 @@ absl::Status InstanceAlreadyExists(absl::string_view uri); absl::Status InstanceNameMismatch(absl::string_view uri); absl::Status InstanceUpdatesNotSupported(); absl::Status InvalidInstanceName(absl::string_view instance_id); +absl::Status InvalidCreateInstanceRequestUnitsNotBoth(); +absl::Status InvalidCreateInstanceRequestUnitsMultiple(); // Database errors. absl::Status InvalidDatabaseURI(absl::string_view uri); diff --git a/frontend/collections/BUILD b/frontend/collections/BUILD index be449f2c..c8c1428c 100644 --- a/frontend/collections/BUILD +++ b/frontend/collections/BUILD @@ -24,6 +24,7 @@ cc_library( hdrs = ["database_manager.h"], deps = [ "//backend/database", + "//backend/schema/updater:schema_updater", "//common:clock", "//common:errors", "//common:limits", diff --git a/frontend/collections/database_manager.cc b/frontend/collections/database_manager.cc index f21d373c..c6786020 100644 --- a/frontend/collections/database_manager.cc +++ b/frontend/collections/database_manager.cc @@ -61,7 +61,7 @@ std::vector> GetDatabasesByInstance( absl::StatusOr> DatabaseManager::CreateDatabase( const std::string& database_uri, - const std::vector& create_statements) { + const backend::SchemaChangeOperation& schema_change_operation) { // Perform bulk of the work outside the database manager lock to allow // CreateDatabase calls to execute in parallel. A common test pattern is to // Create/Drop a database per unit test, and run unit tests in parallel. So @@ -72,7 +72,7 @@ absl::StatusOr> DatabaseManager::CreateDatabase( std::string instance_uri = MakeInstanceUri(project_id, instance_id); ZETASQL_ASSIGN_OR_RETURN(std::unique_ptr backend_db, - backend::Database::Create(clock_, create_statements)); + backend::Database::Create(clock_, schema_change_operation)); auto database = std::make_shared( database_uri, std::move(backend_db), clock_->Now()); diff --git a/frontend/collections/database_manager.h b/frontend/collections/database_manager.h index 319c2978..59a01743 100644 --- a/frontend/collections/database_manager.h +++ b/frontend/collections/database_manager.h @@ -26,6 +26,7 @@ #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/synchronization/mutex.h" +#include "backend/schema/updater/schema_updater.h" #include "common/clock.h" #include "frontend/entities/database.h" #include "absl/status/status.h" @@ -43,7 +44,7 @@ class DatabaseManager { // Creates a database with a schema initialized from `create_statements`. absl::StatusOr> CreateDatabase( const std::string& database_uri, - const std::vector& create_statements) + const backend::SchemaChangeOperation& schema_change_operation) ABSL_LOCKS_EXCLUDED(mu_); // Returns a database with the given URI. diff --git a/frontend/collections/database_manager_test.cc b/frontend/collections/database_manager_test.cc index 8194e57e..6c7dbd86 100644 --- a/frontend/collections/database_manager_test.cc +++ b/frontend/collections/database_manager_test.cc @@ -42,28 +42,29 @@ class DatabaseManagerTest : public testing::Test { Clock clock_; DatabaseManager database_manager_; const std::string database_uri_; - const std::vector empty_schema_; + const backend::SchemaChangeOperation empty_schema_operation_; }; TEST_F(DatabaseManagerTest, CreateNewDatabase) { ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr database, - database_manager_.CreateDatabase(database_uri_, empty_schema_)); + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_)); EXPECT_EQ(database->database_uri(), database_uri_); } TEST_F(DatabaseManagerTest, CreateExistingDatabaseUriFailsWithAlreadyExists) { ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr database, - database_manager_.CreateDatabase(database_uri_, empty_schema_)); - EXPECT_THAT(database_manager_.CreateDatabase(database_uri_, empty_schema_), - zetasql_base::testing::StatusIs(absl::StatusCode::kAlreadyExists)); + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_)); + EXPECT_THAT( + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_), + zetasql_base::testing::StatusIs(absl::StatusCode::kAlreadyExists)); } TEST_F(DatabaseManagerTest, GetExistingDatabase) { ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr database, - database_manager_.CreateDatabase(database_uri_, empty_schema_)); + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_)); ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr actual_database, database_manager_.GetDatabase(database_uri_)); EXPECT_EQ(actual_database->database_uri(), database_uri_); @@ -77,7 +78,7 @@ TEST_F(DatabaseManagerTest, GetNonExistingDatabaseReturnsNotFound) { TEST_F(DatabaseManagerTest, DeleteExistingDatabase) { ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr database, - database_manager_.CreateDatabase(database_uri_, empty_schema_)); + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_)); ZETASQL_EXPECT_OK(database_manager_.DeleteDatabase(database_uri_)); EXPECT_THAT(database_manager_.GetDatabase(database_uri_), zetasql_base::testing::StatusIs(absl::StatusCode::kNotFound)); @@ -90,9 +91,9 @@ TEST_F(DatabaseManagerTest, ListDatabase) { for (int i = 0; i < num_databases; i++) { std::string database_uri = absl::StrCat(instance_uri, "/databases/database-", i); - ZETASQL_ASSERT_OK_AND_ASSIGN( - std::shared_ptr database, - database_manager_.CreateDatabase(database_uri, empty_schema_)); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr database, + database_manager_.CreateDatabase( + database_uri, empty_schema_operation_)); } ZETASQL_ASSERT_OK_AND_ASSIGN(std::vector> databases, database_manager_.ListDatabases(instance_uri)); @@ -109,9 +110,10 @@ TEST_F(DatabaseManagerTest, ListDatabaseWithSimilarInstanceUri) { ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr database, - database_manager_.CreateDatabase(database_uri_, empty_schema_)); - ZETASQL_ASSERT_OK_AND_ASSIGN(database, database_manager_.CreateDatabase( - similar_database_uri, empty_schema_)); + database_manager_.CreateDatabase(database_uri_, empty_schema_operation_)); + ZETASQL_ASSERT_OK_AND_ASSIGN( + database, database_manager_.CreateDatabase(similar_database_uri, + empty_schema_operation_)); ZETASQL_ASSERT_OK_AND_ASSIGN(std::vector> databases, database_manager_.ListDatabases( "projects/test-p/instances/test-instance")); @@ -126,28 +128,29 @@ TEST_F(DatabaseManagerTest, DatabaseQuotaIsEnforced) { // Create 100 databases. for (int i = 1; i <= 100; ++i) { std::string database_uri = absl::StrCat(database_uri_prefix, i); - ZETASQL_ASSERT_OK_AND_ASSIGN( - std::shared_ptr database, - database_manager_.CreateDatabase(database_uri, empty_schema_)); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr database, + database_manager_.CreateDatabase( + database_uri, empty_schema_operation_)); } // The next database creation should fail. - EXPECT_THAT(database_manager_.CreateDatabase( - absl::StrCat(database_uri_prefix, 101), empty_schema_), - zetasql_base::testing::StatusIs(absl::StatusCode::kResourceExhausted)); + EXPECT_THAT( + database_manager_.CreateDatabase(absl::StrCat(database_uri_prefix, 101), + empty_schema_operation_), + zetasql_base::testing::StatusIs(absl::StatusCode::kResourceExhausted)); // But creating a database in another instance should not fail. ZETASQL_EXPECT_OK(database_manager_.CreateDatabase( absl::StrCat("projects/test-project/instances/test-instance-2/databases/" "test-database-", 101), - empty_schema_)); + empty_schema_operation_)); // If we clear some quota, we can create a database again. ZETASQL_EXPECT_OK( database_manager_.DeleteDatabase(absl::StrCat(database_uri_prefix, 100))); ZETASQL_EXPECT_OK(database_manager_.CreateDatabase( - absl::StrCat(database_uri_prefix, 101), empty_schema_)); + absl::StrCat(database_uri_prefix, 101), empty_schema_operation_)); } } // namespace frontend diff --git a/frontend/collections/instance_manager.cc b/frontend/collections/instance_manager.cc index b6de4dd7..d19b174f 100644 --- a/frontend/collections/instance_manager.cc +++ b/frontend/collections/instance_manager.cc @@ -69,12 +69,31 @@ absl::StatusOr> InstanceManager::CreateInstance( const std::string& instance_uri, const instance_api::Instance& instance_proto) { absl::MutexLock lock(&mu_); + if (instance_proto.node_count() > 0 && + instance_proto.processing_units() > 0) { + return error::InvalidCreateInstanceRequestUnitsNotBoth(); + } + if (instance_proto.processing_units() > 0 && + instance_proto.processing_units() < 1000 && + instance_proto.processing_units() % 100 != 0) { + return error::InvalidCreateInstanceRequestUnitsMultiple(); + } + if (instance_proto.processing_units() > 1000 && + instance_proto.processing_units() % 1000 != 0) { + return error::InvalidCreateInstanceRequestUnitsMultiple(); + } + int32_t processing_units; + if (instance_proto.node_count() > 0) { + processing_units = instance_proto.node_count() * 1000; + } else { + processing_units = instance_proto.processing_units(); + } Labels labels(instance_proto.labels().begin(), instance_proto.labels().end()); auto inserted = instances_.insert( {instance_uri, std::make_shared( instance_uri, instance_proto.config(), instance_proto.display_name(), - instance_proto.node_count(), labels, zetasql_base::Clock::RealClock())}); + processing_units, labels, zetasql_base::Clock::RealClock())}); if (!inserted.second) { return error::InstanceAlreadyExists(instance_uri); } diff --git a/frontend/collections/instance_manager_test.cc b/frontend/collections/instance_manager_test.cc index 7705b483..398e035f 100644 --- a/frontend/collections/instance_manager_test.cc +++ b/frontend/collections/instance_manager_test.cc @@ -35,19 +35,21 @@ namespace instance_api = ::google::spanner::admin::instance::v1; using ::google::spanner::emulator::test::EqualsProto; using ::google::spanner::emulator::test::proto::Partially; +using ::testing::MatchesRegex; +using ::zetasql_base::testing::StatusIs; -TEST(InstanceManagerTest, CreateInstance) { +TEST(InstanceManagerTest, CreateInstanceWithNode) { InstanceManager instance_manager; - ZETASQL_ASSERT_OK_AND_ASSIGN( - std::shared_ptr instance, - instance_manager.CreateInstance( - "projects/123/instances/456", PARSE_TEXT_PROTO(R"( - config: 'projects/123/instanceConfigs/emulator-config' - display_name: 'Test Instance' - node_count: 3 - )"))); + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_name("projects/123/instances/456"); + instance_proto.set_state(admin::instance::v1::Instance::READY); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_node_count(3); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr instance, + instance_manager.CreateInstance( + "projects/123/instances/456", instance_proto)); - instance_api::Instance instance_proto; instance->ToProto(&instance_proto); EXPECT_TRUE(instance_proto.has_create_time()); EXPECT_TRUE(instance_proto.has_update_time()); @@ -56,18 +58,147 @@ TEST(InstanceManagerTest, CreateInstance) { config: 'projects/123/instanceConfigs/emulator-config' display_name: 'Test Instance' node_count: 3 + processing_units: 3000 + state: READY + )pb"))); +} + +TEST(InstanceManagerTest, CreateInstanceWithTime) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_node_count(3); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr instance, + instance_manager.CreateInstance( + "projects/123/instances/456", instance_proto)); + + instance->ToProto(&instance_proto); + EXPECT_TRUE(instance_proto.has_create_time()); + EXPECT_TRUE(instance_proto.has_update_time()); + EXPECT_THAT(instance_proto, Partially(EqualsProto(R"pb( + name: 'projects/123/instances/456' + config: 'projects/123/instanceConfigs/emulator-config' + display_name: 'Test Instance' + node_count: 3 + state: READY + )pb"))); +} + +TEST(InstanceManagerTest, CreateInstanceWithMultipleof100ProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_processing_units(300); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr instance, + instance_manager.CreateInstance( + "projects/123/instances/456", instance_proto)); + + instance->ToProto(&instance_proto); + EXPECT_THAT(instance_proto, Partially(EqualsProto(R"pb( + name: 'projects/123/instances/456' + config: 'projects/123/instanceConfigs/emulator-config' + display_name: 'Test Instance' + processing_units: 300 + state: READY + )pb"))); +} + +TEST(InstanceManagerTest, CreateInstanceWithMultipleof1000ProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_processing_units(2000); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr instance, + instance_manager.CreateInstance( + "projects/123/instances/456", instance_proto)); + + instance->ToProto(&instance_proto); + EXPECT_THAT(instance_proto, Partially(EqualsProto(R"pb( + name: 'projects/123/instances/456' + config: 'projects/123/instanceConfigs/emulator-config' + display_name: 'Test Instance' + node_count: 2 + processing_units: 2000 state: READY )pb"))); } +TEST(InstanceManagerTest, CreateInstanceWithoutEitherNodesOrProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr instance, + instance_manager.CreateInstance( + "projects/123/instances/456", instance_proto)); + instance->ToProto(&instance_proto); + EXPECT_THAT(instance_proto, Partially(EqualsProto(R"pb( + name: 'projects/123/instances/456' + config: 'projects/123/instanceConfigs/emulator-config' + display_name: 'Test Instance' + state: READY + )pb"))); +} + +TEST(InstanceManagerTest, CannotCreateInstanceWithBothNodesAndProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_node_count(3); + instance_proto.set_processing_units(300); + EXPECT_THAT(instance_manager.CreateInstance("projects/123/instances/456", + instance_proto), + StatusIs(absl::StatusCode::kInvalidArgument, + MatchesRegex(".*Only one of nodes or " + "processing units should be specified.*"))); +} + +TEST(InstanceManagerTest, + CannotCreateInstanceWithNonMultiple100ProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_processing_units(250); + EXPECT_THAT( + instance_manager.CreateInstance("projects/123/instances/456", + instance_proto), + StatusIs( + absl::StatusCode::kInvalidArgument, + MatchesRegex(".*Processing units should be " + "multiple of 100 for values below 1000 and multiples of " + "1000 for values above 1000.*"))); +} + +TEST(InstanceManagerTest, + CannotCreateInstanceWithNonMultiple1000ProcessingUnits) { + InstanceManager instance_manager; + admin::instance::v1::Instance instance_proto; + instance_proto.set_config("projects/123/instanceConfigs/emulator-config"); + instance_proto.set_display_name("Test Instance"); + instance_proto.set_processing_units(1500); + EXPECT_THAT( + instance_manager.CreateInstance("projects/123/instances/456", + instance_proto), + StatusIs( + absl::StatusCode::kInvalidArgument, + MatchesRegex(".*Processing units should be " + "multiple of 100 for values below 1000 and multiples of " + "1000 for values above 1000.*"))); +} + TEST(InstanceManagerTest, GetInstance) { InstanceManager instance_manager; ZETASQL_ASSERT_OK(instance_manager.CreateInstance( - "projects/123/instances/456", PARSE_TEXT_PROTO(R"( + "projects/123/instances/456", PARSE_TEXT_PROTO(R"pb( config: 'projects/123/instanceConfigs/emulator-config' display_name: 'Test Instance' node_count: 3 - )"))); + )pb"))); ZETASQL_ASSERT_OK_AND_ASSIGN( std::shared_ptr instance, @@ -89,17 +220,17 @@ TEST(InstanceManagerTest, GetInstance) { TEST(InstanceManagerTest, ListInstances) { InstanceManager instance_manager; ZETASQL_ASSERT_OK(instance_manager.CreateInstance( - "projects/123/instances/456", PARSE_TEXT_PROTO(R"( + "projects/123/instances/456", PARSE_TEXT_PROTO(R"pb( config: 'projects/123/instanceConfigs/emulator-config' display_name: 'Test Instance' node_count: 3 - )"))); + )pb"))); ZETASQL_ASSERT_OK(instance_manager.CreateInstance( - "projects/123/instances/789", PARSE_TEXT_PROTO(R"( + "projects/123/instances/789", PARSE_TEXT_PROTO(R"pb( config: 'projects/123/instanceConfigs/emulator-config' display_name: 'Test Instance' node_count: 6 - )"))); + )pb"))); ZETASQL_ASSERT_OK_AND_ASSIGN(std::vector> instances, instance_manager.ListInstances("projects/123")); @@ -131,18 +262,17 @@ TEST(InstanceManagerTest, ListInstances) { TEST(InstanceManagerTest, DeleteInstance) { InstanceManager instance_manager; ZETASQL_ASSERT_OK(instance_manager.CreateInstance( - "projects/123/instances/456", PARSE_TEXT_PROTO(R"( + "projects/123/instances/456", PARSE_TEXT_PROTO(R"pb( config: 'projects/123/instanceConfigs/emulator-config' display_name: 'Test Instance' node_count: 6 - )"))); + )pb"))); instance_manager.DeleteInstance("projects/123/instances/456"); EXPECT_THAT(instance_manager.GetInstance("projects/123/instances/456"), - zetasql_base::testing::StatusIs( - absl::StatusCode::kNotFound, - testing::MatchesRegex(".*Instance not found.*"))); + StatusIs(absl::StatusCode::kNotFound, + MatchesRegex(".*Instance not found.*"))); } } // namespace diff --git a/frontend/entities/instance.cc b/frontend/entities/instance.cc index dd65a51b..17a2dcac 100644 --- a/frontend/entities/instance.cc +++ b/frontend/entities/instance.cc @@ -33,6 +33,7 @@ void Instance::ToProto(admin::instance::v1::Instance* instance) const { instance->set_config(config_); instance->set_display_name(display_name_); instance->set_node_count(node_count_); + instance->set_processing_units(processing_units_); instance->mutable_labels()->insert(labels_.begin(), labels_.end()); // Instances are always in ready state. instance->set_state(admin::instance::v1::Instance::READY); diff --git a/frontend/entities/instance.h b/frontend/entities/instance.h index 065594ca..d5142e26 100644 --- a/frontend/entities/instance.h +++ b/frontend/entities/instance.h @@ -36,12 +36,13 @@ namespace frontend { class Instance { public: Instance(const std::string& name, const std::string config, - const std::string& display_name, int32_t node_count, Labels labels, - zetasql_base::Clock* clock) + const std::string& display_name, int32_t processing_units, + Labels labels, zetasql_base::Clock* clock) : name_(name), config_(config), display_name_(display_name), - node_count_(node_count), + node_count_(processing_units / 1000), + processing_units_(processing_units), labels_(labels) { auto current_time = clock->TimeNow(); create_time_ = current_time; @@ -67,6 +68,9 @@ class Instance { // The number of nodes in this instance. int32_t node_count_; + // The number of processing units in this instance. + int32_t processing_units_; + // The labels for this instance. Labels labels_; diff --git a/frontend/entities/instance_test.cc b/frontend/entities/instance_test.cc index 386bcbba..318e01b6 100644 --- a/frontend/entities/instance_test.cc +++ b/frontend/entities/instance_test.cc @@ -45,7 +45,7 @@ TEST(InstanceTest, Basic) { Instance instance(/*name=*/"projects/test-project/instances/test-instance", /*config=*/"emulator-config", /*display_name=*/"Test Instance", - /*node_count=*/5, + /*processing_units=*/5000, /*labels*/ labels, &clock); instance_api::Instance instance_pb; instance.ToProto(&instance_pb); diff --git a/frontend/handlers/BUILD b/frontend/handlers/BUILD index 2286348b..469f8f67 100644 --- a/frontend/handlers/BUILD +++ b/frontend/handlers/BUILD @@ -26,8 +26,8 @@ cc_library( "//backend/schema/ddl:operations_cc_proto", "//backend/schema/parser:ddl_parser", "//backend/schema/printer:print_ddl", + "//backend/schema/updater:schema_updater", "//common:errors", - "//common:limits", "//frontend/common:uris", "//frontend/converters:time", "//frontend/entities:database", @@ -36,7 +36,6 @@ cc_library( "@com_google_googleapis//google/longrunning:longrunning_cc_grpc", "@com_google_googleapis//google/spanner/admin/database/v1:database_cc_grpc", "@com_google_protobuf//:cc_wkt_protos", - "@com_googlesource_code_re2//:re2", ], alwayslink = 1, ) diff --git a/frontend/handlers/databases.cc b/frontend/handlers/databases.cc index 8c2d80e0..51db78a8 100644 --- a/frontend/handlers/databases.cc +++ b/frontend/handlers/databases.cc @@ -25,13 +25,12 @@ #include "backend/schema/ddl/operations.pb.h" #include "backend/schema/parser/ddl_parser.h" #include "backend/schema/printer/print_ddl.h" +#include "backend/schema/updater/schema_updater.h" #include "common/errors.h" -#include "common/limits.h" #include "frontend/common/uris.h" #include "frontend/converters/time.h" #include "frontend/entities/database.h" #include "frontend/server/handler.h" -#include "re2/re2.h" #include "zetasql/base/status_macros.h" namespace google { @@ -115,7 +114,9 @@ absl::Status CreateDatabase(RequestContext* ctx, } ZETASQL_ASSIGN_OR_RETURN(std::shared_ptr database, ctx->env()->database_manager()->CreateDatabase( - database_uri, create_statements)); + database_uri, backend::SchemaChangeOperation{ + .statements = create_statements, + })); // Create an operation tracking the database creation. ZETASQL_ASSIGN_OR_RETURN(std::shared_ptr operation, @@ -180,9 +181,11 @@ absl::Status UpdateDatabaseDdl( int num_succesful_statements; absl::Time commit_timestamp; absl::Status backfill_status; - ZETASQL_RETURN_IF_ERROR( - backend_database->UpdateSchema(statements, &num_succesful_statements, - &commit_timestamp, &backfill_status)); + ZETASQL_RETURN_IF_ERROR(backend_database->UpdateSchema( + backend::SchemaChangeOperation{ + .statements = statements, + }, + &num_succesful_statements, &commit_timestamp, &backfill_status)); // Populate ResultSet metadata. // For simplicity in emulator, we have implemented the schema updates in such diff --git a/frontend/server/BUILD b/frontend/server/BUILD index ef8f7953..82cddc77 100644 --- a/frontend/server/BUILD +++ b/frontend/server/BUILD @@ -39,9 +39,7 @@ cc_test( ":request_context", "//frontend/common:uris", "//tests/common:proto_matchers", - "//tests/common:test_env", "@com_github_grpc_grpc//:grpc++", - "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", "@com_google_googleapis//google/spanner/admin/instance/v1:instance_cc_grpc", "@com_google_googletest//:gtest_main", diff --git a/frontend/server/request_context_test.cc b/frontend/server/request_context_test.cc index d9bcc8b8..11b8b72d 100644 --- a/frontend/server/request_context_test.cc +++ b/frontend/server/request_context_test.cc @@ -25,10 +25,8 @@ #include "gtest/gtest.h" #include "zetasql/base/testing/status_matchers.h" #include "tests/common/proto_matchers.h" -#include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "frontend/common/uris.h" -#include "tests/common/test_env.h" namespace google { namespace spanner { @@ -51,18 +49,18 @@ class SessionExistenceTest : public testing::Test { absl::StrCat(instance_uri, "/databases/test-database"); // Create an instance. - instance_api::Instance instance_pb = PARSE_TEXT_PROTO(R"( + instance_api::Instance instance_pb = PARSE_TEXT_PROTO(R"pb( name: "projects/test-project/instances/test-instance" display_name: "" node_count: 3 - )"); + )pb"); ZETASQL_ASSERT_OK( env_->instance_manager()->CreateInstance(instance_uri, instance_pb)); // Create a database that belongs to the instance created above. - std::vector empty_schema; - ZETASQL_ASSERT_OK_AND_ASSIGN( - std::shared_ptr database, - env_->database_manager()->CreateDatabase(database_uri, empty_schema)); + backend::SchemaChangeOperation empty_schema_operation; + ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr database, + env_->database_manager()->CreateDatabase( + database_uri, empty_schema_operation)); // Create a session that belongs to the database created above. ZETASQL_ASSERT_OK_AND_ASSIGN(std::shared_ptr session, diff --git a/tests/common/proto_matchers.cc b/tests/common/proto_matchers.cc index 2f5426a1..75f049a1 100644 --- a/tests/common/proto_matchers.cc +++ b/tests/common/proto_matchers.cc @@ -19,9 +19,9 @@ #include #include -#include "google/protobuf/io/tokenizer.h" #include "google/protobuf/text_format.h" #include "absl/strings/substitute.h" +#include "google/protobuf/io/tokenizer.h" namespace google { namespace spanner { diff --git a/tests/common/schema_constructor.h b/tests/common/schema_constructor.h index 77ca095a..2ed1603f 100644 --- a/tests/common/schema_constructor.h +++ b/tests/common/schema_constructor.h @@ -49,7 +49,8 @@ CreateSchemaFromDDL(absl::Span statements, .column_id_generator = &column_id_gen, }; backend::SchemaUpdater updater; - return updater.ValidateSchemaFromDDL(statements, context); + return updater.ValidateSchemaFromDDL( + backend::SchemaChangeOperation{.statements = statements}, context); } // Creates a schema with a single table and an index on the table. diff --git a/tests/conformance/cases/column_default_value_read_write.cc b/tests/conformance/cases/column_default_value_read_write.cc index 234e7eb6..f534c8bb 100644 --- a/tests/conformance/cases/column_default_value_read_write.cc +++ b/tests/conformance/cases/column_default_value_read_write.cc @@ -340,6 +340,63 @@ TEST_F(ColumnDefaultValueReadWriteTest, ForeignKey) { testing::HasSubstr("Foreign key"))); } +TEST_F(ColumnDefaultValueReadWriteTest, DMLsUsingDefaultValues) { + ZETASQL_ASSERT_OK( + CommitDml({SqlStatement("INSERT T (K, D2) VALUES (1, 1000), (2, 2000)"), + SqlStatement("UPDATE T SET D1 = 100 WHERE K = 2")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{1, 1, 2, 1000}, {2, 100, 101, 2000}})); + + ZETASQL_ASSERT_OK(CommitDml({SqlStatement("UPDATE T SET D1 = DEFAULT WHERE K = 2")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{1, 1, 2, 1000}, {2, 1, 2, 2000}})); +} + +TEST_F(ColumnDefaultValueReadWriteTest, DMLsInsertWithDefaultKeyword) { + ZETASQL_ASSERT_OK(CommitDml({SqlStatement( + "INSERT T (K, D1, D2) VALUES (3, 300, DEFAULT), (4, DEFAULT, 400)")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{3, 300, 301, 2}, {4, 1, 2, 400}})); + + // Try delete: + ZETASQL_ASSERT_OK(CommitDml({SqlStatement("DELETE FROM T WHERE D1=1")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{3, 300, 301, 2}})); +} + +TEST_F(ColumnDefaultValueReadWriteTest, InvalidUsesOfDefaultKeyword) { + ZETASQL_ASSERT_OK( + CommitDml({SqlStatement("INSERT T (K, D2) VALUES (1, 1000), (2, 2000)"), + SqlStatement("UPDATE T SET D1 = 100 WHERE K = 2")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{1, 1, 2, 1000}, {2, 100, 101, 2000}})); + + ASSERT_THAT(CommitDml({SqlStatement("DELETE FROM T WHERE D1=DEFAULT")}), + StatusIs(absl::StatusCode::kInvalidArgument, + testing::HasSubstr("Unexpected keyword DEFAULT"))); + + ASSERT_THAT(CommitDml({SqlStatement("UPDATE T SET D2 = 4 WHERE D1=DEFAULT")}), + StatusIs(absl::StatusCode::kInvalidArgument, + testing::HasSubstr("Unexpected keyword DEFAULT"))); + + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{1, 1, 2, 1000}, {2, 100, 101, 2000}})); +} + +TEST_F(ColumnDefaultValueReadWriteTest, DMLsInsertWithUserInput) { + ZETASQL_ASSERT_OK(CommitDml( + {SqlStatement("INSERT T2 (K, D1, D2) VALUES (5, 5, 5), (6, 6, 6)")})); + EXPECT_THAT(Query("SELECT K, D1, D2 FROM T2 ORDER BY K ASC"), + IsOkAndHoldsRows({{5, 5, 5}, {6, 6, 6}})); + + // Insert .. select, D1 doesn't appear in the column list so it gets + // its default value = 1, G1 is computed from D1 + ZETASQL_ASSERT_OK( + CommitDml({SqlStatement("INSERT T (K, D2) SELECT K, D2 FROM T2")})); + EXPECT_THAT(Query("SELECT K, D1, G1, D2 FROM T ORDER BY K ASC"), + IsOkAndHoldsRows({{5, 1, 2, 5}, {6, 1, 2, 6}})); +} + class DefaultPrimaryKeyReadWriteTest : public DatabaseTest { public: absl::Status SetUpDatabase() override { @@ -509,6 +566,20 @@ TEST_F(DefaultPrimaryKeyReadWriteTest, WithDependentGeneratedColumn) { {100, 1, 200, 1}, {100, 3, 200, 300}})); } + +TEST_F(DefaultPrimaryKeyReadWriteTest, DMLs) { + ZETASQL_ASSERT_OK(CommitDml({SqlStatement("INSERT T2 (id2, a) VALUES (1, 1)")})); + EXPECT_THAT(Query("SELECT id1, id2, g1, a FROM T2 ORDER BY id2 ASC"), + IsOkAndHoldsRows({{100, 1, 200, 1}})); + + ZETASQL_ASSERT_OK( + CommitDml({SqlStatement("INSERT T2 (id1, id2, a) VALUES " + "(DEFAULT, 2, 2), (3, 3, 3)")})); + EXPECT_THAT( + Query("SELECT id1, id2, g1, a FROM T2 ORDER BY id2 ASC"), + IsOkAndHoldsRows({{100, 1, 200, 1}, {100, 2, 200, 2}, {3, 3, 6, 3}})); +} + } // namespace } // namespace test diff --git a/tests/conformance/cases/information_schema.cc b/tests/conformance/cases/information_schema.cc index a27a872c..eb4ecbea 100644 --- a/tests/conformance/cases/information_schema.cc +++ b/tests/conformance/cases/information_schema.cc @@ -100,6 +100,7 @@ class InformationSchemaTest : public DatabaseTest { "CHANGE_STREAMS", "CHANGE_STREAM_COLUMNS", "CHANGE_STREAM_OPTIONS", + "CHANGE_STREAM_PRIVILEGES", "CHANGE_STREAM_TABLES", "VIEWS", "ROLES", diff --git a/tests/conformance/cases/snapshot_reads.cc b/tests/conformance/cases/snapshot_reads.cc index cd6c4ddd..86fdf1bb 100644 --- a/tests/conformance/cases/snapshot_reads.cc +++ b/tests/conformance/cases/snapshot_reads.cc @@ -46,36 +46,60 @@ class SnapshotReadsTest : public DatabaseTest { TEST_F(SnapshotReadsTest, CanReadWithMinTimestampBound) { // Insert a few rows. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", "23"})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); // Read using a min_timestamp bounded staleness. - EXPECT_THAT( - Read(Transaction::SingleUseOptions( - MakePastTimestamp(std::chrono::minutes(10))), - "Users", {"ID", "Name", "Age"}, KeySet::All()), - IsOkAndHoldsRows({ValueRow{1, "John", 23}, ValueRow{2, "Peter", 41}})); + auto result = Read(Transaction::SingleUseOptions( + MakePastTimestamp(std::chrono::minutes(10))), + "Users", {"ID", "Name", "Age"}, KeySet::All()); + EXPECT_THAT(result, zetasql_base::testing::IsOk()); + // Bounded staleness reads can return an empty set or a subset of the rows in + // their committed order. With a bounded staleness of 10 mins, the reads can + // return an empty set. When a non-empty set is returned, we want to ensure + // that the commit order is still respected, and the first row always exists + // in the results. + if (!result.value().empty()) { + EXPECT_THAT(result.value(), + testing::IsSupersetOf({ValueRow{1, "John", 23}})); + } + // Ensures that the bounded staleness does not return any other data, and only + // returns these two rows. + EXPECT_THAT(result.value(), testing::IsSubsetOf({ValueRow{1, "John", 23}, + ValueRow{2, "Peter", 41}})); } TEST_F(SnapshotReadsTest, CanReadWithMaxStalenessBound) { // Insert a few rows. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); - - // Read using a max statleness bound. - EXPECT_THAT( - Read(Transaction::SingleUseOptions(std::chrono::minutes(10)), - "Users", {"ID", "Name", "Age"}, KeySet::All()), - IsOkAndHoldsRows({ValueRow{1, "John", 23}, ValueRow{2, "Peter", 41}})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + + // Read using a max staleness bound. + auto result = Read(Transaction::SingleUseOptions(std::chrono::minutes(10)), + "Users", {"ID", "Name", "Age"}, KeySet::All()); + EXPECT_THAT(result, zetasql_base::testing::IsOk()); + // Bounded staleness reads can return an empty set or a subset of the rows in + // their committed order. With a bounded staleness of 10 mins, the reads can + // return an empty set. When a non-empty set is returned, we want to ensure + // that the commit order is still respected, and the first row always exists + // in the results. + if (!result.value().empty()) { + EXPECT_THAT(result.value(), + testing::IsSupersetOf({ValueRow{1, "John", 23}})); + } + // Ensures that the bounded staleness does not return any other data, and only + // returns these two rows. + EXPECT_THAT(result.value(), testing::IsSubsetOf({ValueRow{1, "John", 23}, + ValueRow{2, "Peter", 41}})); } TEST_F(SnapshotReadsTest, CanReadWithExactTimestamp) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); // Sleep for 2s, and then insert another row. absl::SleepFor(absl::Seconds(2)); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); // Read using an exact timestamp option set at 1s in the past. Only row 1 // is visible at that timestamp. @@ -87,11 +111,11 @@ TEST_F(SnapshotReadsTest, CanReadWithExactTimestamp) { TEST_F(SnapshotReadsTest, CanReadWithExactStaleness) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); // Sleep for 2s, and then insert another row. absl::SleepFor(absl::Seconds(2)); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); // Read using an exact staleness option set to 1s in the past. Only // row 1 is visible at that timestamp. @@ -103,8 +127,8 @@ TEST_F(SnapshotReadsTest, CanReadWithExactStaleness) { TEST_F(SnapshotReadsTest, CanReadWithExactTimestampInFuture) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); // Read using an exact timestamp option set to 100 ms in the future. Able to // read all the rows, but will wait for ~100 ms to pass before returning. Use @@ -122,8 +146,8 @@ TEST_F(SnapshotReadsTest, CanReadWithExactTimestampInFuture) { TEST_F(SnapshotReadsTest, CanReadWithMinTimestampBoundInFuture) { // Insert a few rows. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", "23"})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", "23"})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); // Read using a min_timestamp bound set to 100 ms in future. Able to read all // rows, but will wait for ~100 ms to pass before returning. Use a larger time @@ -141,8 +165,8 @@ TEST_F(SnapshotReadsTest, CanReadWithMinTimestampBoundInFuture) { TEST_F(SnapshotReadsTest, CannnotReadWithExactTimestampTooFarInFuture) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); if (!in_prod_env()) { EXPECT_THAT(Read(Transaction::SingleUseOptions(Transaction::ReadOnlyOptions( @@ -154,8 +178,8 @@ TEST_F(SnapshotReadsTest, CannnotReadWithExactTimestampTooFarInFuture) { TEST_F(SnapshotReadsTest, CannnotQueryWithExactTimestampTooFarInFuture) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); if (!in_prod_env()) { EXPECT_THAT(QuerySingleUseTransaction( @@ -168,8 +192,8 @@ TEST_F(SnapshotReadsTest, CannnotQueryWithExactTimestampTooFarInFuture) { TEST_F(SnapshotReadsTest, CannnotReadWithMinTimestampBoundTooFarInFuture) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); if (!in_prod_env()) { EXPECT_THAT(Read(Transaction::SingleUseOptions( @@ -181,8 +205,8 @@ TEST_F(SnapshotReadsTest, CannnotReadWithMinTimestampBoundTooFarInFuture) { TEST_F(SnapshotReadsTest, CannnotQueryWithMinTimestampBoundTooFarInFuture) { // Insert a row. - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); - ZETASQL_EXPECT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {1, "John", 23})); + ZETASQL_ASSERT_OK(Insert("Users", {"ID", "Name", "Age"}, {2, "Peter", 41})); if (!in_prod_env()) { EXPECT_THAT(QuerySingleUseTransaction( diff --git a/tests/gcloud/instance_admin_test.py b/tests/gcloud/instance_admin_test.py index ce4dc2f4..f6133be4 100644 --- a/tests/gcloud/instance_admin_test.py +++ b/tests/gcloud/instance_admin_test.py @@ -57,12 +57,15 @@ def testListInstances(self): self.assertEqual( self.RunGCloud( # TODO: Consider adding column instanceType. - 'spanner', 'instances', 'list', '--format', + 'spanner', + 'instances', + 'list', + '--format', 'table(name, displayName, config, nodeCount, processingUnits, state)' ), self.JoinLines( 'NAME DISPLAY_NAME CONFIG NODE_COUNT PROCESSING_UNITS STATE', - 'test-instance Test Instance emulator-config 3 READY' + 'test-instance Test Instance emulator-config 3 3000 READY' )) def testDescribeInstance(self): @@ -76,7 +79,7 @@ def testDescribeInstance(self): 'config: projects/test-project/instanceConfigs/emulator-config', r'createTime: {}'.format(time_format), 'displayName: Test Instance', 'name: projects/test-project/instances/test-instance', - 'nodeCount: 3', 'state: READY', + 'nodeCount: 3', 'processingUnits: 3000', 'state: READY', r'updateTime: {}'.format(time_format))) def testDeleteInstance(self):